use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class CacheXml66DUnitTest method testPartitionedRegionAttributesForEviction.
/**
* Tests that a Partitioned Region can be created with a named attributes set programmatically for
* ExpirationAttributes
*/
@Test
public void testPartitionedRegionAttributesForEviction() throws Exception {
final int redundantCopies = 1;
CacheCreation cache = new CacheCreation();
if (getGemFireVersion().equals(CacheXml.VERSION_6_0)) {
ResourceManagerCreation rm = new ResourceManagerCreation();
rm.setCriticalHeapPercentage(95);
cache.setResourceManagerCreation(rm);
}
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setStatisticsEnabled(true);
RegionAttributes rootAttrs = null;
ExpirationAttributes expiration = new ExpirationAttributes(60, ExpirationAction.DESTROY);
CacheXMLPartitionResolver partitionResolver = new CacheXMLPartitionResolver();
Properties params = new Properties();
params.setProperty("initial-index-value", "1000");
params.setProperty("secondary-index-value", "5000");
partitionResolver.init(params);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies);
paf.setTotalMaxMemory(500);
paf.setLocalMaxMemory(100);
paf.setPartitionResolver(partitionResolver);
AttributesFactory fac = new AttributesFactory(attrs);
// TODO: Move test back to using LRUHeap when config issues have settled
// if (getGemFireVersion().equals(CacheXml.GEMFIRE_6_0)) {
// fac.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null,
// EvictionAction.OVERFLOW_TO_DISK));
// } else {
fac.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(100, null, EvictionAction.OVERFLOW_TO_DISK));
// }
fac.setEntryTimeToLive(expiration);
fac.setEntryIdleTimeout(expiration);
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
dwaf.setSynchronous(true);
fac.setPartitionAttributes(paf.create());
rootAttrs = fac.create();
cache.createRegion("parRoot", rootAttrs);
Region r = cache.getRegion("parRoot");
assertNotNull(r);
assertEquals(r.getAttributes().getPartitionAttributes().getRedundantCopies(), redundantCopies);
assertEquals(r.getAttributes().getPartitionAttributes().getLocalMaxMemory(), 100);
assertEquals(r.getAttributes().getPartitionAttributes().getTotalMaxMemory(), 500);
assertEquals(r.getAttributes().getPartitionAttributes().getPartitionResolver(), partitionResolver);
assertEquals(r.getAttributes().getEntryIdleTimeout().getTimeout(), expiration.getTimeout());
assertEquals(r.getAttributes().getEntryTimeToLive().getTimeout(), expiration.getTimeout());
testXml(cache);
Cache c = getCache();
assertNotNull(c);
Region region = c.getRegion("parRoot");
assertNotNull(region);
RegionAttributes regionAttrs = region.getAttributes();
PartitionAttributes pa = regionAttrs.getPartitionAttributes();
EvictionAttributes ea = regionAttrs.getEvictionAttributes();
assertEquals(pa.getRedundantCopies(), 1);
assertEquals(pa.getLocalMaxMemory(), 100);
assertEquals(pa.getTotalMaxMemory(), 500);
assertNotNull(pa.getPartitionResolver().getClass());
assertEquals(pa.getPartitionResolver(), partitionResolver);
assertEquals(regionAttrs.getEntryIdleTimeout().getTimeout(), expiration.getTimeout());
assertEquals(regionAttrs.getEntryTimeToLive().getTimeout(), expiration.getTimeout());
// TODO: Move test back to using LRUHeap when config issues have settled
// if (getGemFireVersion().equals(CacheXml.GEMFIRE_6_0)) {
// assertIndexDetailsEquals(ea.getAlgorithm(),EvictionAlgorithm.LRU_HEAP);
// } else {
assertEquals(ea.getAlgorithm(), EvictionAlgorithm.LRU_MEMORY);
// }
assertEquals(ea.getAction(), EvictionAction.OVERFLOW_TO_DISK);
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class CacheXml66DUnitTest method testPartitionedRegionAttributesForMemLruWithoutMaxMem.
@Test
public void testPartitionedRegionAttributesForMemLruWithoutMaxMem() throws Exception {
final int redundantCopies = 1;
CacheCreation cache = new CacheCreation();
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setStatisticsEnabled(true);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies);
paf.setTotalMaxMemory(500);
paf.setLocalMaxMemory(100);
AttributesFactory fac = new AttributesFactory(attrs);
fac.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(null, EvictionAction.LOCAL_DESTROY));
fac.setPartitionAttributes(paf.create());
cache.createRegion("parRoot", fac.create());
testXml(cache);
Cache c = getCache();
assertNotNull(c);
Region region = c.getRegion("parRoot");
assertNotNull(region);
RegionAttributes regionAttrs = region.getAttributes();
PartitionAttributes pa = regionAttrs.getPartitionAttributes();
EvictionAttributes ea = regionAttrs.getEvictionAttributes();
assertEquals(pa.getRedundantCopies(), 1);
assertEquals(pa.getLocalMaxMemory(), 100);
assertEquals(pa.getTotalMaxMemory(), 500);
assertEquals(ea.getAlgorithm(), EvictionAlgorithm.LRU_MEMORY);
assertEquals(ea.getAction(), EvictionAction.LOCAL_DESTROY);
assertEquals(ea.getMaximum(), pa.getLocalMaxMemory());
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class CacheXml66DUnitTest method testPartitionedRegionAttributesForCustomPartitioning.
/**
* Tests that a region created with a named attributes set programmatically for partition-resolver
* has the correct attributes.
*/
@Test
public void testPartitionedRegionAttributesForCustomPartitioning() throws Exception {
CacheCreation cache = new CacheCreation();
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
CacheXMLPartitionResolver partitionResolver = new CacheXMLPartitionResolver();
Properties params = new Properties();
params.setProperty("initial-index-value", "1000");
params.setProperty("secondary-index-value", "5000");
partitionResolver.init(params);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setTotalMaxMemory(500);
paf.setLocalMaxMemory(100);
paf.setPartitionResolver(partitionResolver);
attrs.setPartitionAttributes(paf.create());
cache.createRegion("parRoot", attrs);
Region r = cache.getRegion("parRoot");
assertEquals(r.getAttributes().getPartitionAttributes().getRedundantCopies(), 1);
assertEquals(r.getAttributes().getPartitionAttributes().getLocalMaxMemory(), 100);
assertEquals(r.getAttributes().getPartitionAttributes().getTotalMaxMemory(), 500);
assertEquals(r.getAttributes().getPartitionAttributes().getPartitionResolver(), partitionResolver);
testXml(cache);
Cache c = getCache();
assertNotNull(c);
Region region = c.getRegion("parRoot");
assertNotNull(region);
RegionAttributes regionAttrs = region.getAttributes();
PartitionAttributes pa = regionAttrs.getPartitionAttributes();
assertEquals(pa.getRedundantCopies(), 1);
assertEquals(pa.getLocalMaxMemory(), 100);
assertEquals(pa.getTotalMaxMemory(), 500);
assertNotNull(pa.getPartitionResolver().getClass());
assertEquals(pa.getPartitionResolver(), partitionResolver);
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class DeltaSizingDUnitTest method doTest.
public void doTest(final AccessorFactory accessorFactory, final boolean clone, final boolean copyOnRead) throws InterruptedException {
final Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableCallable createDataRegion = new SerializableCallable("createRegion") {
public Object call() throws Exception {
Cache cache = getCache();
cache.setCopyOnRead(copyOnRead);
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
attr.setCloningEnabled(clone);
// attr.setCacheWriter(new CacheWriterAdapter() {
//
// @Override
// public void beforeCreate(EntryEvent event)
// throws CacheWriterException {
// assertTrue(event.getOldValue() == null);
// assertTrue(event.getNewValue() instanceof MyClass);
// }
//
// @Override
// public void beforeUpdate(EntryEvent event)
// throws CacheWriterException {
// assertTrue(event.getOldValue() instanceof MyClass);
// assertTrue(event.getNewValue() instanceof MyClass);
// assertIndexDetailsEquals(event.getOldValue(), event.getNewValue());
// }
//
// });
cache.createRegion("region1", attr.create());
CacheServer server = cache.addCacheServer();
int port = AvailablePortHelper.getRandomAvailableTCPPort();
server.setPort(port);
server.start();
return Integer.valueOf(port);
}
};
final Integer port1 = (Integer) vm0.invoke(createDataRegion);
final Integer port2 = (Integer) vm1.invoke(createDataRegion);
SerializableRunnable createEmptyRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
cache.setCopyOnRead(copyOnRead);
Region<Integer, TestDelta> region = accessorFactory.createRegion(host, cache, port1.intValue(), port2.intValue());
// This call just creates a bucket. We do an extra serialization on entries that trigger
// bucket creation. Thats a bug that should get fixed, but for now it's throwing off my
// assertions. So I'll force the creation of the bucket
region.put(new Integer(113), new TestDelta(false, "bogus"));
// Now put an entry in that we will modify
region.put(new Integer(0), new TestDelta(false, "initial"));
}
};
vm2.invoke(createEmptyRegion);
int clones = 0;
// Get the object size in both VMS
long size = checkObjects(vm0, 1, 1, 0, clones);
assertEquals(size, checkObjects(vm1, 1, 1, 0, clones));
// Now apply a delta
vm2.invoke(new SerializableRunnable("update") {
public void run() {
Cache cache = getCache();
Region<Object, TestDelta> region = cache.getRegion("region1");
region.put(new Integer(0), new TestDelta(true, "changedAAAAAAAA"));
}
});
clones = 0;
if (copyOnRead) {
// 1 clone to read the object when we test it (the object should be in deserialized form)
clones += 1;
} else if (clone) {
// 1 clone copy the object when we modify it (the object should be in serialized form)
clones += 1;
}
// Check to make sure the size hasn't changed
assertEquals(size, checkObjects(vm0, 1, 1, 1, clones));
assertEquals(size, checkObjects(vm1, 1, 1, 1, clones));
// Try another
vm2.invoke(new SerializableRunnable("update") {
public void run() {
Cache cache = getCache();
Region<Object, TestDelta> region = cache.getRegion("region1");
region.put(new Integer(0), new TestDelta(true, "changedBBBBBBB"));
}
});
if (clone || copyOnRead) {
// 1 clone to copy the object when we apply the delta.
clones += 1;
}
if (copyOnRead) {
// 1 clone to read the object when we test it
clones += 1;
}
// Check to make sure the size hasn't changed
assertEquals(size, checkObjects(vm0, 1, 1, 2, clones));
assertEquals(size, checkObjects(vm1, 1, 1, 2, clones));
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PartitionedRegionRedundancyZoneDUnitTest method createPR.
protected DistributedMember createPR(VM vm, int redundancy) throws Exception {
SerializableCallable createPrRegion = new SerializableCallable("createRegion") {
public Object call() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
return cache.getDistributedSystem().getDistributedMember();
}
};
return (DistributedMember) vm.invoke(createPrRegion);
}
Aggregations