use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode2.
@Test
public void testColocatedPRWithAccessorOnDifferentNode2() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRWithAccessorOnDifferentNode2") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(0);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with accessor on different nodes") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
basicGetCache().createRegion(regionName, attr.create());
fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
} catch (Exception Expected) {
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
}
}
});
ex.remove();
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithPROnDifferentNode1.
@Test
public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("TestColocatedPRWithPROnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(20);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
dataStore2.invoke(new CacheSerializableRunnable("testColocatedPRwithPROnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(20);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets", dataStore2);
dataStore2.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
Region r = basicGetCache().createRegion(regionName, attr.create());
// fail("It should have failed with Exception : Colocated regions
// should have accessors at the same node");
r.put("key", "value");
fail("Failed because we did not receive the exception - : Cannot create buckets, " + "as colocated regions are not configured to be at the same nodes.");
} catch (Exception Expected) {
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().contains("Cannot create buckets, as " + "colocated regions are not configured to be at the same nodes."));
}
}
});
ex.remove();
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
Region r = basicGetCache().createRegion(regionName, attr.create());
r.put("key", "value");
assertEquals("value", (String) r.get("key"));
} catch (Exception NotExpected) {
NotExpected.printStackTrace();
LogWriterUtils.getLogWriter().info("Unexpected Exception Message : " + NotExpected.getMessage());
Assert.fail("Unpexpected Exception", NotExpected);
}
}
});
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode1.
@Test
public void testColocatedPRWithAccessorOnDifferentNode1() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRwithAccessorOnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with Accessor on different nodes") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
localMaxmemory = new Integer(0);
redundancy = new Integer(0);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
basicGetCache().createRegion(regionName, attr.create());
fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
} catch (Exception Expected) {
Expected.printStackTrace();
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
}
}
});
ex.remove();
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PRColocationDUnitTest method createSubPR.
public static void createSubPR(String partitionedRegionName, Integer redundancy, Integer localMaxMemory, Integer totalNumBuckets, Object colocatedWith, Boolean isPartitionResolver) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxMemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith((String) colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
assertNotNull(basicGetCache());
Region root = basicGetCache().createRegion("root" + partitionedRegionName, attr.create());
attr.setPartitionAttributes(prAttr);
Region pr = root.createSubregion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned sub region " + pr.getName() + " created Successfully :" + pr.toString());
if (localMaxMemory == 0) {
putInPartitionedRegion(pr);
}
}
use of org.apache.geode.cache.PartitionAttributes in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testConflictChecksDuringConcurrentDeltaGIIAndOtherOp.
/**
* This test creates 2 VMs in a distributed system with a persistent PartitionedRegion and one VM
* (VM1) puts an entry in region. Second VM (VM2) starts later and does a delta GII. During Delta
* GII in VM2 a DESTROY operation happens in VM1 and gets propagated to VM2 concurrently with GII.
* At this point if entry version is greater than the once received from GII then it must not get
* applied. Which is Bug #45921.
*
*/
@Test
public void testConflictChecksDuringConcurrentDeltaGIIAndOtherOp() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue");
assertEquals(1, region.size());
}
});
// Create a cache and region, do an update to change the version no. and
// restart the cache and region.
vm1.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue2");
cache.close();
// Restart
cache = getCache();
region = cache.createRegionFactory(rAttrs).create("prRegion");
}
});
// Do a DESTROY in vm0 when delta GII is in progress in vm1 (Hopefully, Not
// guaranteed).
AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Detroy entry in region") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
while (!region.get("testKey").equals("testValue2")) {
Wait.pause(100);
}
region.destroy("testKey");
}
});
try {
async.join(3000);
} catch (InterruptedException e) {
new AssertionError("VM1 entry destroy did not finish in 3000 ms");
}
vm1.invoke(new CacheSerializableRunnable("Verifying entry version in new node VM1") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
Region.Entry entry = ((PartitionedRegion) region).getEntry("testKey", true);
RegionEntry re = ((EntrySnapshot) entry).getRegionEntry();
LogWriterUtils.getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
assertTrue(re.getValueInVM((LocalRegion) region) instanceof Tombstone);
VersionTag tag = re.getVersionStamp().asVersionTag();
assertEquals(3, /* Two puts and a Destroy */
tag.getEntryVersion());
}
});
closeCache(vm0);
closeCache(vm1);
}
Aggregations