use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithDestroy.
@Test
public void testColocatedPRWithDestroy() throws Throwable {
createCacheInAllVms();
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
try {
// Create Customer PartitionedRegion in All VMs
regionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
createPartitionedRegion(attributeObjects);
// Create Order PartitionedRegion in All VMs
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
createPartitionedRegion(attributeObjects);
} catch (Exception Expected) {
assertTrue(Expected instanceof IllegalStateException);
}
// Put the customer 1-10 in CustomerPartitionedRegion
accessor.invoke(() -> PRColocationDUnitTest.putCustomerPartitionedRegion(CustomerPartitionedRegionName));
// Put the order 1-10 for each Customer in OrderPartitionedRegion
accessor.invoke(() -> PRColocationDUnitTest.putOrderPartitionedRegion(OrderPartitionedRegionName));
// add expected exception string
final String expectedExMessage = "colocation chain cannot be destroyed, " + "unless all its children";
final IgnoredException ex = IgnoredException.addIgnoredException(expectedExMessage, dataStore1);
dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {
@Override
public void run2() {
Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
try {
partitionedregion.destroyRegion();
fail("It should have thrown an Exception saying: " + expectedExMessage);
} catch (IllegalStateException expected) {
LogWriterUtils.getLogWriter().info("Got message: " + expected.getMessage());
assertTrue(expected.getMessage().contains(expectedExMessage));
}
}
});
ex.remove();
dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {
@Override
public void run2() {
Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + OrderPartitionedRegionName);
try {
partitionedregion.destroyRegion();
} catch (Exception unexpected) {
unexpected.printStackTrace();
LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
fail("Could not destroy the child region.");
}
}
});
dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {
@Override
public void run2() {
Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
try {
partitionedregion.destroyRegion();
} catch (Exception unexpected) {
unexpected.printStackTrace();
LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
fail("Could not destroy the parent region.");
}
}
});
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode2.
@Test
public void testColocatedPRWithAccessorOnDifferentNode2() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRWithAccessorOnDifferentNode2") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(0);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with accessor on different nodes") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
basicGetCache().createRegion(regionName, attr.create());
fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
} catch (Exception Expected) {
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
}
}
});
ex.remove();
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithPROnDifferentNode1.
@Test
public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("TestColocatedPRWithPROnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(20);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
dataStore2.invoke(new CacheSerializableRunnable("testColocatedPRwithPROnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(20);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets", dataStore2);
dataStore2.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
Region r = basicGetCache().createRegion(regionName, attr.create());
// fail("It should have failed with Exception : Colocated regions
// should have accessors at the same node");
r.put("key", "value");
fail("Failed because we did not receive the exception - : Cannot create buckets, " + "as colocated regions are not configured to be at the same nodes.");
} catch (Exception Expected) {
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().contains("Cannot create buckets, as " + "colocated regions are not configured to be at the same nodes."));
}
}
});
ex.remove();
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
Region r = basicGetCache().createRegion(regionName, attr.create());
r.put("key", "value");
assertEquals("value", (String) r.get("key"));
} catch (Exception NotExpected) {
NotExpected.printStackTrace();
LogWriterUtils.getLogWriter().info("Unexpected Exception Message : " + NotExpected.getMessage());
Assert.fail("Unpexpected Exception", NotExpected);
}
}
});
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode1.
@Test
public void testColocatedPRWithAccessorOnDifferentNode1() throws Throwable {
createCacheInAllVms();
dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRwithAccessorOnDifferentNode") {
@Override
public void run2() {
String partitionedRegionName = CustomerPartitionedRegionName;
colocatedWith = null;
isPartitionResolver = new Boolean(false);
redundancy = new Integer(0);
localMaxmemory = new Integer(50);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
assertNotNull(pr);
LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
}
});
// add expected exception string
final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with Accessor on different nodes") {
@Override
public void run2() {
regionName = OrderPartitionedRegionName;
colocatedWith = CustomerPartitionedRegionName;
isPartitionResolver = new Boolean(false);
localMaxmemory = new Integer(0);
redundancy = new Integer(0);
totalNumBuckets = new Integer(11);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
if (isPartitionResolver.booleanValue()) {
paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
}
PartitionAttributes prAttr = paf.create();
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(prAttr);
assertNotNull(basicGetCache());
try {
basicGetCache().createRegion(regionName, attr.create());
fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
} catch (Exception Expected) {
Expected.printStackTrace();
LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
}
}
});
ex.remove();
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method testSplitBrainWithNonPersistentRegion.
@Test
public void testSplitBrainWithNonPersistentRegion() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
createPersistentRegion(vm1);
putAnEntry(vm1);
updateTheEntry(vm1);
closeRegion(vm1);
createNonPersistentRegion(vm0);
IgnoredException e = IgnoredException.addIgnoredException(IllegalStateException.class.getSimpleName(), vm1);
try {
createPersistentRegion(vm1);
fail("Should have received an IllegalState exception");
} catch (Exception expected) {
if (!(expected.getCause() instanceof IllegalStateException)) {
throw expected;
}
} finally {
e.remove();
}
closeRegion(vm0);
createPersistentRegion(vm1);
checkForEntry(vm1);
checkForRecoveryStat(vm1, true);
}
Aggregations