use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.
the class PartitionedRegionBucketCreationDistributionDUnitTest method validateBucketsDistribution.
private CacheSerializableRunnable validateBucketsDistribution(final int startIndexForRegion, final int endIndexForRegion, final int noBucketsExpectedOnEachNode) {
CacheSerializableRunnable validateBucketDist = new CacheSerializableRunnable("validateBucketsDistribution") {
String innerPrPrefix = prPrefix;
public void run2() {
Cache cache = getCache();
final Region root = cache.getRegion(PartitionedRegionHelper.PR_ROOT_REGION_NAME);
assertNotNull("Root regions is null", root);
for (int i = startIndexForRegion; i < endIndexForRegion; i++) {
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + innerPrPrefix + i);
assertNotNull("This region can not be null" + pr.getName(), pr);
assertNotNull(pr.getDataStore());
final int localBSize = pr.getDataStore().getBucketsManaged();
LogWriterUtils.getLogWriter().info("validateBucketsDistribution() - Number of bukctes for " + pr.getName() + " : " + localBSize);
assertTrue("Bucket Distribution for region = " + pr.getFullPath() + " is not correct for member " + pr.getDistributionManager().getId() + " existing size " + localBSize + " smaller than expected " + noBucketsExpectedOnEachNode, localBSize >= noBucketsExpectedOnEachNode);
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
Region bucketRegion = root.getSubregion(pr.getBucketName(bucketId.intValue()));
assertEquals(bucketRegion.getFullPath(), r.getFullPath());
}
});
}
}
};
return validateBucketDist;
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.
the class Bug38741DUnitTest method testPartitionedRegionAndCopyOnRead.
/**
* Test to ensure that a PartitionedRegion doesn't make more than the expected number of copies
* when copy-on-read is set to true
*
* @throws Exception
*/
@Test
public void testPartitionedRegionAndCopyOnRead() throws Exception {
final Host h = Host.getHost(0);
final VM accessor = h.getVM(2);
final VM datastore = h.getVM(3);
final String rName = getUniqueName();
final String k1 = "k1";
datastore.invoke(new CacheSerializableRunnable("Create PR DataStore") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(0).create());
createRootRegion(rName, factory.create());
}
});
accessor.invoke(new CacheSerializableRunnable("Create PR Accessor and put new value") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).setRedundantCopies(0).create());
Region r = createRootRegion(rName, factory.create());
SerializationCountingValue val = new SerializationCountingValue();
r.put(k1, val);
// First put to a bucket will serialize once to determine the size of the value
// to know how much extra space the new bucket with the new entry will consume
// and serialize again to send the bytes
assertEquals(2, val.count.get());
// A put to an already created bucket should only be serialized once
val = new SerializationCountingValue();
r.put(k1, val);
assertEquals(1, val.count.get());
}
});
datastore.invoke(new CacheSerializableRunnable("assert datastore entry serialization count") {
public void run2() throws CacheException {
PartitionedRegion pr = (PartitionedRegion) getRootRegion(rName);
// Visit the one bucket (since there is only one value in the entire PR)
// to directly copy the entry bytes and assert the serialization count.
// All this extra work is to assure the serialization count does not increase
// (by de-serializing the value stored in the map, which would then have to be
// re-serialized).
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
BucketRegion br = (BucketRegion) r;
try {
KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
Object val = rv.getRawValue();
assertTrue(val instanceof CachedDeserializable);
CachedDeserializable cd = (CachedDeserializable) val;
SerializationCountingValue scv = (SerializationCountingValue) cd.getDeserializedForReading();
assertEquals(1, scv.count.get());
} catch (IOException fail) {
Assert.fail("Unexpected IOException", fail);
}
}
});
}
});
accessor.invoke(new CacheSerializableRunnable("assert accessor entry serialization count") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
// The counter was incremented once to send the data to the datastore
assertEquals(1, v1.count.get());
getCache().setCopyOnRead(true);
// Once to send the data to the datastore, no need to do a serialization
// when we make copy since it is serialized from datastore to us.
SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
assertEquals(1, v2.count.get());
assertTrue(v1 != v2);
}
});
datastore.invoke(new CacheSerializableRunnable("assert value serialization") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
// Once to send the value from the accessor to the data store
assertEquals(1, v1.count.get());
getCache().setCopyOnRead(true);
// Once to send the value from the accessor to the data store
// once to make a local copy
SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
assertEquals(2, v2.count.get());
assertTrue(v1 != v2);
}
});
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.
the class SerializableMonth method partitionedRegionTest.
public void partitionedRegionTest(final String prName) {
/*
* Do put() operations through VM with PR having both Accessor and Datastore
*/
vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
int size = 0;
size = pr.size();
assertEquals("Size doesnt return expected value", 0, size);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
assertEquals(0, pr.keySet().size());
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2100))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys1.add(key);
assertNotNull(pr);
pr.put(key, Integer.toString(i));
assertEquals(Integer.toString(i), pr.get(key));
}
PartitionedRegion ppr = (PartitionedRegion) pr;
try {
ppr.dumpAllBuckets(false);
} catch (ReplyException re) {
Assert.fail("dumpAllBuckets", re);
}
}
});
vm1.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2200))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys2.add(key);
assertNotNull(pr);
pr.put(key, Integer.toString(i));
assertEquals(Integer.toString(i), pr.get(key));
}
PartitionedRegion ppr = (PartitionedRegion) pr;
try {
ppr.dumpAllBuckets(false);
} catch (ReplyException re) {
Assert.fail("dumpAllBuckets", re);
}
}
});
vm2.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2300))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys3.add(key);
assertNotNull(pr);
pr.put(key, Integer.toString(i));
assertEquals(Integer.toString(i), pr.get(key));
}
PartitionedRegion ppr = (PartitionedRegion) pr;
try {
ppr.dumpAllBuckets(false);
} catch (ReplyException re) {
Assert.fail("dumpAllBuckets", re);
}
}
});
vm3.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations3") {
public void run2() throws CacheException {
Calendar cal = Calendar.getInstance();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
for (int i = 0; i <= 11; i++) {
int yr = (new Integer((int) (Math.random() * 2400))).intValue();
int month = i;
int date = (new Integer((int) (Math.random() * 30))).intValue();
cal.set(yr, month, date);
Object key = cal.getTime();
listOfKeys4.add(key);
assertNotNull(pr);
pr.put(key, Integer.toString(i));
assertEquals(Integer.toString(i), pr.get(key));
}
PartitionedRegion ppr = (PartitionedRegion) pr;
try {
ppr.dumpAllBuckets(false);
} catch (ReplyException re) {
Assert.fail("dumpAllBuckets", re);
}
}
});
vm0.invoke(new CacheSerializableRunnable("verifyKeysonVM0") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys1.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
Set s = pr.getBucketKeys(bucketId.intValue());
Iterator it = s.iterator();
while (it.hasNext()) {
EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
PartitionResolver rr = pr.getPartitionResolver();
Object o = rr.getRoutingObject(eo);
Integer i = new Integer(o.hashCode() % totalNumBuckets);
assertEquals(bucketId, i);
}
// getLogWriter().severe("Key " + key + " found in bucket " + b);
}
});
}
});
vm1.invoke(new CacheSerializableRunnable("verifyKeysonVM1") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys2.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
Set s = pr.getBucketKeys(bucketId.intValue());
Iterator it = s.iterator();
while (it.hasNext()) {
EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
PartitionResolver rr = pr.getPartitionResolver();
Object o = rr.getRoutingObject(eo);
Integer i = new Integer(o.hashCode() % totalNumBuckets);
assertEquals(bucketId, i);
}
// getLogWriter().severe("Key " + key + " found in bucket " + b);
}
});
}
});
vm2.invoke(new CacheSerializableRunnable("verifyKeysonVM2") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys3.iterator();
itr = listOfKeys3.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
Set s = pr.getBucketKeys(bucketId.intValue());
Iterator it = s.iterator();
while (it.hasNext()) {
EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
PartitionResolver rr = pr.getPartitionResolver();
Object o = rr.getRoutingObject(eo);
Integer i = new Integer(o.hashCode() % totalNumBuckets);
// assertIndexDetailsEquals(bucketId, bucketId);
assertEquals(bucketId, i);
}
// getLogWriter().severe("Key " + key + " found in bucket " + b);
}
});
}
});
vm3.invoke(new CacheSerializableRunnable("verifyKeysonVM3") {
public void run2() throws CacheException {
// Calendar cal = Calendar.getInstance();
final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
Iterator itr = listOfKeys4.iterator();
itr = listOfKeys4.iterator();
while (itr.hasNext()) {
assertTrue(searchForKey(pr, (Date) itr.next()));
}
assertEquals(pr.getDataStore(), null);
}
});
}
Aggregations