Search in sources :

Example 46 with RegionAttributes

use of org.apache.geode.cache.RegionAttributes in project geode by apache.

the class DeltaPropagationDUnitTest method createServerCache.

public static Integer createServerCache(String ePolicy, Integer cap, Integer listenerCode, Boolean conflate, Compressor compressor) throws Exception {
    ConnectionTable.threadWantsSharedResources();
    new DeltaPropagationDUnitTest().createCache(new Properties());
    AttributesFactory factory = new AttributesFactory();
    factory.setEnableSubscriptionConflation(conflate);
    if (listenerCode.intValue() != 0) {
        factory.addCacheListener(getCacheListener(listenerCode));
    }
    if (compressor != null) {
        factory.setCompressor(compressor);
    }
    if (listenerCode.intValue() == C2S2S_SERVER_LISTENER) {
        factory.setScope(Scope.DISTRIBUTED_NO_ACK);
        factory.setDataPolicy(DataPolicy.NORMAL);
        factory.setConcurrencyChecksEnabled(false);
        RegionAttributes attrs = factory.create();
        Region r = cache.createRegion(regionName, attrs);
        logger = cache.getLogger();
        r.create(DELTA_KEY, deltaPut[0]);
    } else {
        factory.setScope(Scope.DISTRIBUTED_ACK);
        factory.setDataPolicy(DataPolicy.REPLICATE);
        factory.setConcurrencyChecksEnabled(false);
        RegionAttributes attrs = factory.create();
        cache.createRegion(regionName, attrs);
        logger = cache.getLogger();
    }
    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
    CacheServer server1 = cache.addCacheServer();
    server1.setPort(port);
    server1.setNotifyBySubscription(true);
    if (ePolicy != null) {
        File overflowDirectory = new File("bsi_overflow_" + port);
        overflowDirectory.mkdir();
        DiskStoreFactory dsf = cache.createDiskStoreFactory();
        File[] dirs1 = new File[] { overflowDirectory };
        server1.getClientSubscriptionConfig().setEvictionPolicy(ePolicy);
        server1.getClientSubscriptionConfig().setCapacity(cap.intValue());
        // specify diskstore for this server
        server1.getClientSubscriptionConfig().setDiskStoreName(dsf.setDiskDirs(dirs1).create("bsi").getName());
    }
    server1.start();
    return new Integer(server1.getPort());
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Example 47 with RegionAttributes

use of org.apache.geode.cache.RegionAttributes in project geode by apache.

the class FixedPRSinglehopDUnitTest method createRegionsInClientCache.

private static void createRegionsInClientCache(String poolName) {
    AttributesFactory factory = new AttributesFactory();
    factory.setPoolName(poolName);
    factory.setDataPolicy(DataPolicy.EMPTY);
    RegionAttributes attrs = factory.create();
    region = cache.createRegion(PR_NAME, attrs);
    assertNotNull(region);
    LogWriterUtils.getLogWriter().info("Distributed Region " + PR_NAME + " created Successfully :" + region.toString());
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes)

Example 48 with RegionAttributes

use of org.apache.geode.cache.RegionAttributes in project geode by apache.

the class PartitionedRegionCreationJUnitTest method test000PartitionedRegionCreate.

/*
   * 1)Create 10 thread each. Each thread will try to create PartionedRegion total of 5 partitioned
   * region will be created. 5 threads should throw RegionExistException. 2) Tests for PR scope =
   * GLOBAL and PR scope = LOCAL </p> 3) Test for redundancy < 0 </p> 4) Test for redundancy > 3
   * </p> 5) Test for localMaxMemory < 0 </p>
   */
@Test
public void test000PartitionedRegionCreate() {
    createMultiplePartitionedRegions();
    verifyCreateResults();
    if (logger.fineEnabled()) {
        logger.fine(" PartitionedRegionCreationTest-testpartionedRegionCreate() Successfully Complete ..  ");
    }
    final String regionname = "testPartionedRegionCreate";
    int localMaxMemory = 0;
    PartitionedRegion pr = null;
    // Test vanilla creation of a Partitioned Region w/o Scope
    try {
        AttributesFactory af = new AttributesFactory();
        af.setDataPolicy(DataPolicy.PARTITION);
        RegionAttributes ra = af.create();
        Cache cache = PartitionedRegionTestHelper.createCache();
        pr = (PartitionedRegion) cache.createRegion(regionname, ra);
    } finally {
        pr.destroyRegion();
    }
    // Assert that setting any scope throws IllegalStateException
    final Scope[] scopes = { Scope.LOCAL, Scope.DISTRIBUTED_ACK, Scope.DISTRIBUTED_NO_ACK, Scope.GLOBAL };
    for (int i = 0; i < scopes.length; i++) {
        try {
            AttributesFactory af = new AttributesFactory();
            af.setDataPolicy(DataPolicy.PARTITION);
            af.setScope(scopes[i]);
            RegionAttributes ra = af.create();
            Cache cache = PartitionedRegionTestHelper.createCache();
            pr = (PartitionedRegion) cache.createRegion(regionname, ra);
            fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for Scope " + scopes[i]);
        } catch (IllegalStateException expected) {
        } finally {
            if (pr != null && !pr.isDestroyed()) {
                pr.destroyRegion();
            }
        }
    }
    // test for redundancy > 3
    int redundancy = 10;
    try {
        pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionname, String.valueOf(localMaxMemory), redundancy);
    } catch (IllegalStateException illex) {
        if (logger.fineEnabled()) {
            logger.fine("testpartionedRegionCreate() Got a correct exception-IllegalStateException for  redundancy > 3 ");
        }
    }
    // test for redundancy < 0
    if (pr != null && !pr.isDestroyed())
        pr.destroyRegion();
    redundancy = -5;
    try {
        pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionname, String.valueOf(200), redundancy);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for redundancy < 0 ");
    } catch (IllegalStateException illex) {
        if (logger.fineEnabled()) {
            logger.fine("testpartionedRegionCreate() Got a correct exception-IllegalStateException for  redundancy < 0 ");
        }
    }
// test for localMaxMemory < 0
/*
     * if (pr!= null && !pr.isDestroyed()) pr.destroyRegion(); ; localMaxMemory = -5; try { pr =
     * (PartitionedRegion)PartitionedRegionTestHelper .createPartitionedRegion(regionname,
     * String.valueOf(localMaxMemory), 2, Scope.DISTRIBUTED_ACK);
     * fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for localMaxMemory < 0 "
     * ); } catch (IllegalStateException illex) { if (logger.fineEnabled()) { logger
     * .fine("testpartionedRegionCreate() Got a correct exception-IllegalStateException for  localMaxMemory < 0  "
     * ); } }
     */
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 49 with RegionAttributes

use of org.apache.geode.cache.RegionAttributes in project geode by apache.

the class PartitionedRegionCreationJUnitTest method test001PersistentPartitionedRegionCreate.

@Test
public void test001PersistentPartitionedRegionCreate() {
    final String regionname = "testPersistentPartionedRegionCreate";
    PartitionedRegion pr = null;
    // Test vanilla creation of a Partitioned Region w/o Scope
    try {
        AttributesFactory af = new AttributesFactory();
        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
        RegionAttributes ra = af.create();
        Cache cache = PartitionedRegionTestHelper.createCache();
        pr = (PartitionedRegion) cache.createRegion(regionname, ra);
    } finally {
        if (pr != null) {
            pr.destroyRegion();
        }
    }
    // Assert that an accessor (localMaxMem == 0) can't be persistent
    try {
        AttributesFactory af = new AttributesFactory();
        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
        af.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).create());
        RegionAttributes ra = af.create();
        Cache cache = PartitionedRegionTestHelper.createCache();
        pr = (PartitionedRegion) cache.createRegion(regionname, ra);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown");
    } catch (IllegalStateException expected) {
        assertEquals("Persistence is not allowed when local-max-memory is zero.", expected.getMessage());
    }
    // if configured with a diskStoreName and the disk store has not be created.
    try {
        AttributesFactory af = new AttributesFactory();
        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
        af.setDiskStoreName("nonexistentDiskStore");
        RegionAttributes ra = af.create();
        Cache cache = PartitionedRegionTestHelper.createCache();
        pr = (PartitionedRegion) cache.createRegion(regionname, ra);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown");
    } catch (RuntimeException expected) {
        assertTrue(expected.getMessage().contains(LocalizedStrings.CacheCreation_DISKSTORE_NOTFOUND_0.toLocalizedString("nonexistentDiskStore")));
    }
    // Assert that you can't have a diskStoreName unless you are persistent or overflow.
    try {
        Cache cache = PartitionedRegionTestHelper.createCache();
        cache.createDiskStoreFactory().create("existentDiskStore");
        AttributesFactory af = new AttributesFactory();
        af.setDataPolicy(DataPolicy.PARTITION);
        af.setDiskStoreName("existentDiskStore");
        RegionAttributes ra = af.create();
        pr = (PartitionedRegion) cache.createRegion(regionname, ra);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown");
    } catch (IllegalStateException expected) {
        assertEquals("Only regions with persistence or overflow to disk can specify DiskStore", expected.getMessage());
    }
    // Assert that setting any scope throws IllegalStateException
    final Scope[] scopes = { Scope.LOCAL, Scope.DISTRIBUTED_ACK, Scope.DISTRIBUTED_NO_ACK, Scope.GLOBAL };
    for (int i = 0; i < scopes.length; i++) {
        try {
            AttributesFactory af = new AttributesFactory();
            af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            af.setScope(scopes[i]);
            RegionAttributes ra = af.create();
            Cache cache = PartitionedRegionTestHelper.createCache();
            pr = (PartitionedRegion) cache.createRegion(regionname, ra);
            fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for Scope " + scopes[i]);
        } catch (IllegalStateException expected) {
        }
    }
    // test for redundancy > 3
    try {
        pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionname, String.valueOf(0), 4);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for redundancy > 3 ");
    } catch (IllegalStateException illex) {
        if (logger.fineEnabled()) {
            logger.fine("testpartionedRegionCreate() Got a correct exception-IllegalStateException for  redundancy > 3 ");
        }
    }
    // test for redundancy < 0
    try {
        pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionname, String.valueOf(200), -1);
        fail("testpartionedRegionCreate() Expected IllegalStateException not thrown for redundancy < 0 ");
    } catch (IllegalStateException illex) {
        if (logger.fineEnabled()) {
            logger.fine("testpartionedRegionCreate() Got a correct exception-IllegalStateException for  redundancy < 0 ");
        }
    }
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 50 with RegionAttributes

use of org.apache.geode.cache.RegionAttributes in project geode by apache.

the class PartitionedRegionEvictionDUnitTest method testEntryLRUWithLocalDestroy.

@Test
public void testEntryLRUWithLocalDestroy() {
    final Host host = Host.getHost(0);
    final VM vm2 = host.getVM(2);
    final VM vm3 = host.getVM(3);
    final String uniqName = getUniqueName();
    final int redundantCopies = 1;
    final int maxBuckets = 8;
    final int maxEntries = 16;
    final String name = uniqName + "-PR";
    final int extraEntries = 4;
    // final int heapPercentage = 66;
    // final int evictorInterval = 100;
    final SerializableRunnable create = new CacheSerializableRunnable("Create Entry LRU with local destroy on a partitioned Region") {

        public void run2() {
            final AttributesFactory factory = new AttributesFactory();
            factory.setOffHeap(isOffHeap());
            factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setTotalNumBuckets(maxBuckets).create());
            factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.LOCAL_DESTROY));
            factory.addCacheListener(new VerifiableCacheListener() {

                private long evictionDestroyEvents = 0;

                public void afterDestroy(EntryEvent e) {
                    System.out.println("EEEEEEEEEEEEEE key:" + e.getKey());
                    EntryEventImpl eei = (EntryEventImpl) e;
                    if (Operation.EVICT_DESTROY.equals(eei.getOperation())) {
                        evictionDestroyEvents++;
                    }
                }

                public boolean verify(long expectedEntries) {
                    return expectedEntries == evictionDestroyEvents;
                }
            });
            final PartitionedRegion pr = (PartitionedRegion) createRootRegion(name, factory.create());
            assertNotNull(pr);
        }
    };
    vm3.invoke(create);
    final SerializableRunnable create2 = new SerializableRunnable("Create Entry LRU with local destroy on a partitioned Region") {

        public void run() {
            try {
                final AttributesFactory factory = new AttributesFactory();
                factory.setOffHeap(isOffHeap());
                factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setTotalNumBuckets(8).create());
                factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries));
                final PartitionedRegion pr = (PartitionedRegion) createRootRegion(name, factory.create());
                assertNotNull(pr);
            } catch (final CacheException ex) {
                Assert.fail("While creating Partitioned region", ex);
            }
        }
    };
    vm2.invoke(create2);
    final SerializableRunnable createBuckets = new SerializableRunnable("Create Buckets") {

        public void run() {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
            assertNotNull(pr);
            for (int counter = 1; counter <= maxEntries + extraEntries; counter++) {
                pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
            }
        }
    };
    vm3.invoke(createBuckets);
    final SerializableCallable assertBucketAttributesAndEviction = new SerializableCallable("Assert bucket attributes and eviction") {

        public Object call() throws Exception {
            try {
                final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
                assertNotNull(pr);
                long entriesEvicted = 0;
                for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i.hasNext(); ) {
                    final Map.Entry entry = (Map.Entry) i.next();
                    final BucketRegion bucketRegion = (BucketRegion) entry.getValue();
                    if (bucketRegion == null) {
                        continue;
                    }
                    assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAlgorithm().isLRUEntry());
                    assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAction().isLocalDestroy());
                }
                entriesEvicted = ((AbstractLRURegionMap) pr.entries)._getLruList().stats().getEvictions();
                return new Long(entriesEvicted);
            } finally {
            }
        }
    };
    final Long v2i = (Long) vm2.invoke(assertBucketAttributesAndEviction);
    final Long v3i = (Long) vm3.invoke(assertBucketAttributesAndEviction);
    final int totalEvicted = v2i.intValue() + v3i.intValue();
    assertEquals(2 * extraEntries, totalEvicted);
    final SerializableCallable assertListenerCount = new SerializableCallable("Assert that the number of listener invocations matches the expected total") {

        public Object call() throws Exception {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
            assertNotNull(pr);
            RegionAttributes attrs = pr.getAttributes();
            assertNotNull(attrs);
            long entriesEvicted = ((AbstractLRURegionMap) pr.entries)._getLruList().stats().getEvictions();
            VerifiableCacheListener verifyMe = null;
            for (CacheListener listener : attrs.getCacheListeners()) {
                if (listener instanceof VerifiableCacheListener) {
                    verifyMe = ((VerifiableCacheListener) listener);
                }
            }
            // assert if unable to find the expected listener
            assertNotNull(verifyMe);
            return verifyMe.verify(entriesEvicted);
        }
    };
    assertTrue((Boolean) vm3.invoke(assertListenerCount));
}
Also used : CacheException(org.apache.geode.cache.CacheException) RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheListener(org.apache.geode.cache.CacheListener) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) EntryEvent(org.apache.geode.cache.EntryEvent) Iterator(java.util.Iterator) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Map(java.util.Map) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

RegionAttributes (org.apache.geode.cache.RegionAttributes)590 AttributesFactory (org.apache.geode.cache.AttributesFactory)471 Region (org.apache.geode.cache.Region)256 Test (org.junit.Test)251 Properties (java.util.Properties)158 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)128 ConfigurationProperties (org.apache.geode.distributed.ConfigurationProperties)126 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)118 LocalRegion (org.apache.geode.internal.cache.LocalRegion)112 Cache (org.apache.geode.cache.Cache)99 VM (org.apache.geode.test.dunit.VM)93 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)93 Host (org.apache.geode.test.dunit.Host)89 HashSet (java.util.HashSet)80 CacheException (org.apache.geode.cache.CacheException)65 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)62 CacheServer (org.apache.geode.cache.server.CacheServer)60 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)59 ArrayList (java.util.ArrayList)57 PartitionAttributesImpl (org.apache.geode.internal.cache.PartitionAttributesImpl)56