Search in sources :

Example 36 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode2.

@Test
public void testColocatedPRWithAccessorOnDifferentNode2() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRWithAccessorOnDifferentNode2") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 37 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithPROnDifferentNode1.

@Test
public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("TestColocatedPRWithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    dataStore2.invoke(new CacheSerializableRunnable("testColocatedPRwithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets", dataStore2);
    dataStore2.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                // fail("It should have failed with Exception : Colocated regions
                // should have accessors at the same node");
                r.put("key", "value");
                fail("Failed because we did not receive the exception - : Cannot create buckets, " + "as colocated regions are not configured to be at the same nodes.");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().contains("Cannot create buckets, as " + "colocated regions are not configured to be at the same nodes."));
            }
        }
    });
    ex.remove();
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                r.put("key", "value");
                assertEquals("value", (String) r.get("key"));
            } catch (Exception NotExpected) {
                NotExpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Exception Message : " + NotExpected.getMessage());
                Assert.fail("Unpexpected Exception", NotExpected);
            }
        }
    });
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 38 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode1.

@Test
public void testColocatedPRWithAccessorOnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRwithAccessorOnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with Accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            localMaxmemory = new Integer(0);
            redundancy = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                Expected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 39 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class PRColocationDUnitTest method createSubPR.

public static void createSubPR(String partitionedRegionName, Integer redundancy, Integer localMaxMemory, Integer totalNumBuckets, Object colocatedWith, Boolean isPartitionResolver) {
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxMemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith((String) colocatedWith);
    if (isPartitionResolver.booleanValue()) {
        paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    }
    PartitionAttributes prAttr = paf.create();
    AttributesFactory attr = new AttributesFactory();
    assertNotNull(basicGetCache());
    Region root = basicGetCache().createRegion("root" + partitionedRegionName, attr.create());
    attr.setPartitionAttributes(prAttr);
    Region pr = root.createSubregion(partitionedRegionName, attr.create());
    assertNotNull(pr);
    LogWriterUtils.getLogWriter().info("Partitioned sub region " + pr.getName() + " created Successfully :" + pr.toString());
    if (localMaxMemory == 0) {
        putInPartitionedRegion(pr);
    }
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region)

Example 40 with PartitionAttributes

use of org.apache.geode.cache.PartitionAttributes in project geode by apache.

the class PersistentRVVRecoveryDUnitTest method testConflictChecksDuringConcurrentDeltaGIIAndOtherOp.

/**
   * This test creates 2 VMs in a distributed system with a persistent PartitionedRegion and one VM
   * (VM1) puts an entry in region. Second VM (VM2) starts later and does a delta GII. During Delta
   * GII in VM2 a DESTROY operation happens in VM1 and gets propagated to VM2 concurrently with GII.
   * At this point if entry version is greater than the once received from GII then it must not get
   * applied. Which is Bug #45921.
   *
   */
@Test
public void testConflictChecksDuringConcurrentDeltaGIIAndOtherOp() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(new CacheSerializableRunnable("Create PR and put an entry") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(attrs);
            RegionAttributes rAttrs = factory.create();
            Region region = cache.createRegionFactory(rAttrs).create("prRegion");
            region.put("testKey", "testValue");
            assertEquals(1, region.size());
        }
    });
    // Create a cache and region, do an update to change the version no. and
    // restart the cache and region.
    vm1.invoke(new CacheSerializableRunnable("Create PR and put an entry") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(attrs);
            RegionAttributes rAttrs = factory.create();
            Region region = cache.createRegionFactory(rAttrs).create("prRegion");
            region.put("testKey", "testValue2");
            cache.close();
            // Restart
            cache = getCache();
            region = cache.createRegionFactory(rAttrs).create("prRegion");
        }
    });
    // Do a DESTROY in vm0 when delta GII is in progress in vm1 (Hopefully, Not
    // guaranteed).
    AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Detroy entry in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region region = cache.getRegion("prRegion");
            while (!region.get("testKey").equals("testValue2")) {
                Wait.pause(100);
            }
            region.destroy("testKey");
        }
    });
    try {
        async.join(3000);
    } catch (InterruptedException e) {
        new AssertionError("VM1 entry destroy did not finish in 3000 ms");
    }
    vm1.invoke(new CacheSerializableRunnable("Verifying entry version in new node VM1") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region region = cache.getRegion("prRegion");
            Region.Entry entry = ((PartitionedRegion) region).getEntry("testKey", true);
            RegionEntry re = ((EntrySnapshot) entry).getRegionEntry();
            LogWriterUtils.getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
            assertTrue(re.getValueInVM((LocalRegion) region) instanceof Tombstone);
            VersionTag tag = re.getVersionStamp().asVersionTag();
            assertEquals(3, /* Two puts and a Destroy */
            tag.getEntryVersion());
        }
    });
    closeCache(vm0);
    closeCache(vm1);
}
Also used : CacheException(org.apache.geode.cache.CacheException) RegionAttributes(org.apache.geode.cache.RegionAttributes) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) Tombstone(org.apache.geode.internal.cache.Token.Tombstone) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) RegionEntry(org.apache.geode.internal.cache.RegionEntry) NonTXEntry(org.apache.geode.internal.cache.LocalRegion.NonTXEntry) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) DiskRegion(org.apache.geode.internal.cache.DiskRegion) Region(org.apache.geode.cache.Region) RegionEntry(org.apache.geode.internal.cache.RegionEntry) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

PartitionAttributes (org.apache.geode.cache.PartitionAttributes)129 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)117 AttributesFactory (org.apache.geode.cache.AttributesFactory)107 Region (org.apache.geode.cache.Region)82 Test (org.junit.Test)67 Cache (org.apache.geode.cache.Cache)66 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)61 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)49 Host (org.apache.geode.test.dunit.Host)48 VM (org.apache.geode.test.dunit.VM)48 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)47 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)38 RegionAttributes (org.apache.geode.cache.RegionAttributes)28 CacheException (org.apache.geode.cache.CacheException)26 LocalRegion (org.apache.geode.internal.cache.LocalRegion)26 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)21 BucketRegion (org.apache.geode.internal.cache.BucketRegion)19 FixedPartitionAttributes (org.apache.geode.cache.FixedPartitionAttributes)18 RebalanceResults (org.apache.geode.cache.control.RebalanceResults)16 HashSet (java.util.HashSet)15