use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionedRegionAsSubRegionDUnitTest method createRegionAttributesForPR.
/**
* This private methods sets the passed attributes and returns RegionAttribute object, which is
* used in create region
*
* @param redundancy
* @param localMaxMem
*
* @return
*/
protected RegionAttributes createRegionAttributesForPR(int redundancy, int localMaxMem) {
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).setLocalMaxMemory(localMaxMem).create();
attr.setPartitionAttributes(prAttr);
return attr.create();
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionListenerDUnitTest method createPR.
protected DistributedMember createPR(VM vm, final String regionName, final boolean isAccessor) throws Throwable {
SerializableCallable createPrRegion = new SerializableCallable("createRegion") {
public Object call() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
if (isAccessor) {
paf.setLocalMaxMemory(0);
}
paf.addPartitionListener(new TestPartitionListener());
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion(regionName, attr.create());
return cache.getDistributedSystem().getDistributedMember();
}
};
return (DistributedMember) vm.invoke(createPrRegion);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionedRegionStatsDUnitTest method testDataStoreEntryCountWithRebalance.
@Test
public void testDataStoreEntryCountWithRebalance() throws InterruptedException {
// Ok, first problem, GC'd tombstone is counted as an entry
// To test
// - modifying a tombstone
// - modifying and doing tombstone GC?
final Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
RebalanceOperation op = cache.getResourceManager().createRebalanceFactory().start();
try {
RebalanceResults results = op.getResults();
} catch (Exception e) {
Assert.fail("ex", e);
}
}
};
vm0.invoke(createPrRegion);
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Long.valueOf(0), "A");
region.put(Long.valueOf(1), "A");
region.put(Long.valueOf(113), "A");
region.put(Long.valueOf(114), "A");
region.destroy(Long.valueOf(0));
region.destroy(Long.valueOf(1));
}
});
vm1.invoke(createPrRegion);
validateEntryCount(vm0, 1);
validateEntryCount(vm1, 1);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionedRegionStatsDUnitTest method testDataStoreEntryCount.
/**
* Test to make sure the datastore entry count is accurate.
*
* @throws InterruptedException
*/
@Test
public void testDataStoreEntryCount() throws InterruptedException {
// Ok, first problem, GC'd tombstone is counted as an entry
// To test
// - modifying a tombstone
// - modifying and doing tombstone GC?
final Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(2);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
vm0.invoke(createPrRegion);
vm1.invoke(createPrRegion);
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Long.valueOf(0), "A");
region.put(Long.valueOf(1), "A");
region.put(Long.valueOf(113), "A");
region.put(Long.valueOf(226), "A");
}
});
validateEntryCount(vm0, 4);
validateEntryCount(vm1, 4);
// Do a destroy
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.destroy(Long.valueOf(0));
}
});
// We expect the tombstone won't be recorded as part of
// the entry count
validateEntryCount(vm0, 3);
validateEntryCount(vm1, 3);
// Destroy and modify a tombstone
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.destroy(Long.valueOf(113));
region.put(Long.valueOf(113), "B");
}
});
validateEntryCount(vm0, 3);
validateEntryCount(vm1, 3);
// After GII (which might include the tombstone), a new members
// should still see only 2 live entries.
vm2.invoke(createPrRegion);
// Wait for redundancy to be restored. Once it is the entry count should be
// 2
vm2.invoke(new SerializableRunnable("validate stats") {
public void run() {
Cache cache = getCache();
PartitionedRegion region = (PartitionedRegion) cache.getRegion("region1");
final PartitionedRegionStats stats = region.getPrStats();
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
return stats.getLowRedundancyBucketCount() == 0;
}
@Override
public String description() {
return "Redundancy was not satisfied " + stats.getLowRedundancyBucketCount();
}
}, 20000, 100, true);
}
});
validateEntryCount(vm2, 3);
// A tombstone GC shouldn't affect the count.
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
TombstoneService tombstoneService = cache.getTombstoneService();
try {
tombstoneService.forceBatchExpirationForTests(1);
} catch (InterruptedException e) {
Assert.fail("interrupted", e);
}
}
});
validateEntryCount(vm0, 3);
validateEntryCount(vm1, 3);
validateEntryCount(vm2, 3);
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PartitionedRegionTestUtilsDUnitTest method testGetBucketKeys.
/**
* Test the test method PartitionedRegion.getAllNodes Verify that it returns nodes after a value
* has been placed into the PartitionedRegion.
*
* @see PartitionedRegion#getAllNodes()
*/
@Test
public void testGetBucketKeys() throws Exception {
final String r = getUniqueName();
Host host = Host.getHost(0);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
CacheSerializableRunnable create = new CacheSerializableRunnable("CreatePR") {
public void run2() throws CacheException {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(totalNumBuckets).create());
PartitionedRegion p = (PartitionedRegion) cache.createRegion(r, attr.create());
assertNotNull(p);
}
};
vm2.invoke(create);
vm3.invoke(create);
// Create an accessor
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(totalNumBuckets).setLocalMaxMemory(0).create());
PartitionedRegion p = (PartitionedRegion) cache.createRegion(r, attr.create());
assertNotNull(p);
final int totalBucks = p.getTotalNumberOfBuckets();
for (int i = totalBucks - 1; i >= 0; i--) {
Set s = p.getBucketKeys(i);
assertTrue(s.isEmpty());
}
class TestPRKey implements Serializable {
int hashCode;
int differentiator;
TestPRKey(int hash, int differentiator) {
this.hashCode = hash;
this.differentiator = differentiator;
}
public int hashCode() {
return hashCode;
}
public boolean equals(Object obj) {
if (!(obj instanceof TestPRKey)) {
return false;
}
return ((TestPRKey) obj).differentiator == this.differentiator;
}
public String toString() {
return "TestPRKey " + hashCode + " diff " + differentiator;
}
}
TestPRKey key;
Integer val;
// Create bucket number of keys, assuming a modulous per key hashCode
// There should be one key per bucket
p.put(new TestPRKey(0, 1), new Integer(0));
p.put(new TestPRKey(0, 2), new Integer(1));
p.put(new TestPRKey(0, 3), new Integer(2));
Set s = p.getBucketKeys(0);
assertEquals(3, s.size());
assertEquals(0, ((TestPRKey) s.iterator().next()).hashCode());
assertEquals(0, ((TestPRKey) s.iterator().next()).hashCode());
assertEquals(0, ((TestPRKey) s.iterator().next()).hashCode());
// Skip bucket zero since we have three keys there, but fill out all the rest with keys
for (int i = totalBucks - 1; i > 0; i--) {
key = new TestPRKey(i, 0);
val = new Integer(i);
p.put(key, val);
// Integer gottenVal = (Integer) p.get(key);
// assertIndexDetailsEquals("Value for key: " + key + " val " + gottenVal + " wasn't expected
// " + val, val, gottenVal);
}
// Assert that the proper number of keys are placed in each bucket
for (int i = 1; i < totalBucks; i++) {
s = p.getBucketKeys(i);
assertEquals(s.size(), 1);
key = (TestPRKey) s.iterator().next();
assertEquals(i, key.hashCode());
// assertIndexDetailsEquals(new Integer(i), p.get(key));
}
}
Aggregations