Search in sources :

Example 66 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithDestroy.

@Test
public void testColocatedPRWithDestroy() throws Throwable {
    createCacheInAllVms();
    redundancy = new Integer(0);
    localMaxmemory = new Integer(50);
    totalNumBuckets = new Integer(11);
    try {
        // Create Customer PartitionedRegion in All VMs
        regionName = CustomerPartitionedRegionName;
        colocatedWith = null;
        isPartitionResolver = new Boolean(false);
        attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
        createPartitionedRegion(attributeObjects);
        // Create Order PartitionedRegion in All VMs
        regionName = OrderPartitionedRegionName;
        colocatedWith = CustomerPartitionedRegionName;
        isPartitionResolver = new Boolean(false);
        attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
        createPartitionedRegion(attributeObjects);
    } catch (Exception Expected) {
        assertTrue(Expected instanceof IllegalStateException);
    }
    // Put the customer 1-10 in CustomerPartitionedRegion
    accessor.invoke(() -> PRColocationDUnitTest.putCustomerPartitionedRegion(CustomerPartitionedRegionName));
    // Put the order 1-10 for each Customer in OrderPartitionedRegion
    accessor.invoke(() -> PRColocationDUnitTest.putOrderPartitionedRegion(OrderPartitionedRegionName));
    // add expected exception string
    final String expectedExMessage = "colocation chain cannot be destroyed, " + "unless all its children";
    final IgnoredException ex = IgnoredException.addIgnoredException(expectedExMessage, dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
                fail("It should have thrown an Exception saying: " + expectedExMessage);
            } catch (IllegalStateException expected) {
                LogWriterUtils.getLogWriter().info("Got message: " + expected.getMessage());
                assertTrue(expected.getMessage().contains(expectedExMessage));
            }
        }
    });
    ex.remove();
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + OrderPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
            } catch (Exception unexpected) {
                unexpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
                fail("Could not destroy the child region.");
            }
        }
    });
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
            } catch (Exception unexpected) {
                unexpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
                fail("Could not destroy the parent region.");
            }
        }
    });
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 67 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode2.

@Test
public void testColocatedPRWithAccessorOnDifferentNode2() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRWithAccessorOnDifferentNode2") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 68 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithPROnDifferentNode1.

@Test
public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("TestColocatedPRWithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    dataStore2.invoke(new CacheSerializableRunnable("testColocatedPRwithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets", dataStore2);
    dataStore2.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                // fail("It should have failed with Exception : Colocated regions
                // should have accessors at the same node");
                r.put("key", "value");
                fail("Failed because we did not receive the exception - : Cannot create buckets, " + "as colocated regions are not configured to be at the same nodes.");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().contains("Cannot create buckets, as " + "colocated regions are not configured to be at the same nodes."));
            }
        }
    });
    ex.remove();
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                r.put("key", "value");
                assertEquals("value", (String) r.get("key"));
            } catch (Exception NotExpected) {
                NotExpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Exception Message : " + NotExpected.getMessage());
                Assert.fail("Unpexpected Exception", NotExpected);
            }
        }
    });
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 69 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode1.

@Test
public void testColocatedPRWithAccessorOnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRwithAccessorOnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with Accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            localMaxmemory = new Integer(0);
            redundancy = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                Expected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 70 with IgnoredException

use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.

the class PersistentRecoveryOrderDUnitTest method testSplitBrainWithNonPersistentRegion.

@Test
public void testSplitBrainWithNonPersistentRegion() throws Exception {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    createPersistentRegion(vm1);
    putAnEntry(vm1);
    updateTheEntry(vm1);
    closeRegion(vm1);
    createNonPersistentRegion(vm0);
    IgnoredException e = IgnoredException.addIgnoredException(IllegalStateException.class.getSimpleName(), vm1);
    try {
        createPersistentRegion(vm1);
        fail("Should have received an IllegalState exception");
    } catch (Exception expected) {
        if (!(expected.getCause() instanceof IllegalStateException)) {
            throw expected;
        }
    } finally {
        e.remove();
    }
    closeRegion(vm0);
    createPersistentRegion(vm1);
    checkForEntry(vm1);
    checkForRecoveryStat(vm1, true);
}
Also used : VM(org.apache.geode.test.dunit.VM) IgnoredException(org.apache.geode.test.dunit.IgnoredException) Host(org.apache.geode.test.dunit.Host) RevokedPersistentDataException(org.apache.geode.cache.persistence.RevokedPersistentDataException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) DistributedSystemDisconnectedException(org.apache.geode.distributed.DistributedSystemDisconnectedException) AdminException(org.apache.geode.admin.AdminException) ConflictingPersistentDataException(org.apache.geode.cache.persistence.ConflictingPersistentDataException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) LockServiceDestroyedException(org.apache.geode.distributed.LockServiceDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) PersistentReplicatesOfflineException(org.apache.geode.cache.persistence.PersistentReplicatesOfflineException) IOException(java.io.IOException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Aggregations

IgnoredException (org.apache.geode.test.dunit.IgnoredException)142 Test (org.junit.Test)89 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)71 Region (org.apache.geode.cache.Region)46 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)46 VM (org.apache.geode.test.dunit.VM)43 Host (org.apache.geode.test.dunit.Host)38 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)34 AttributesFactory (org.apache.geode.cache.AttributesFactory)30 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)28 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)23 IOException (java.io.IOException)21 CacheClosedException (org.apache.geode.cache.CacheClosedException)21 LocalRegion (org.apache.geode.internal.cache.LocalRegion)20 PartitionOfflineException (org.apache.geode.cache.persistence.PartitionOfflineException)16 RMIException (org.apache.geode.test.dunit.RMIException)15 GatewaySender (org.apache.geode.cache.wan.GatewaySender)14 BucketRegion (org.apache.geode.internal.cache.BucketRegion)14 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)14 CacheXmlException (org.apache.geode.cache.CacheXmlException)12