Search in sources :

Example 1 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexInitOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.status", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if
    // the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            while (!hooked) {
                Wait.pause(100);
            }
            // Create and hence initialize Index
            try {
                Index index = cache.getQueryService().createIndex("idIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) ClientCache(org.apache.geode.cache.client.ClientCache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 2 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.

// GEODE-1828
@Category(FlakyTest.class)
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) Category(org.junit.experimental.categories.Category) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 3 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class CacheXmlParser method endLRUHeapPercentage.

/**
   * Complete the configuration of a <code>lru-heap-percentage</code> eviction controller. Check for
   * the declaration of an {@link ObjectSizer}. Assign the attributes to the enclosed
   * <code>region-attributes</code>
   */
private void endLRUHeapPercentage() {
    Object declCheck = stack.peek();
    Declarable d = null;
    if (declCheck instanceof String || declCheck instanceof Parameter) {
        d = createDeclarable();
        if (!(d instanceof ObjectSizer)) {
            String s = "A " + d.getClass().getName() + " is not an instance of a ObjectSizer";
            throw new CacheXmlException(s);
        }
    }
    EvictionAttributesImpl eai = (EvictionAttributesImpl) stack.pop();
    if (d != null) {
        eai.setObjectSizer((ObjectSizer) d);
    }
    RegionAttributesCreation regAttrs = peekRegionAttributesContext(LRU_HEAP_PERCENTAGE);
    regAttrs.setEvictionAttributes(eai);
}
Also used : Declarable(org.apache.geode.cache.Declarable) CacheXmlException(org.apache.geode.cache.CacheXmlException) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) ObjectSizer(org.apache.geode.cache.util.ObjectSizer)

Example 4 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ParallelQueueRemovalMessageJUnitTest method createQueueRegion.

private void createQueueRegion() {
    // Mock queue region
    this.queueRegion = mock(PartitionedRegion.class);
    when(this.queueRegion.getFullPath()).thenReturn(getRegionQueueName());
    when(this.queueRegion.getPrStats()).thenReturn(mock(PartitionedRegionStats.class));
    when(this.queueRegion.getDataStore()).thenReturn(mock(PartitionedRegionDataStore.class));
    when(this.queueRegion.getCache()).thenReturn(this.cache);
    EvictionAttributesImpl ea = (EvictionAttributesImpl) EvictionAttributes.createLRUMemoryAttributes(100, null, EvictionAction.OVERFLOW_TO_DISK);
    LRUAlgorithm algorithm = ea.createEvictionController(this.queueRegion, false);
    algorithm.getLRUHelper().initStats(this.queueRegion, this.cache.getDistributedSystem());
    when(this.queueRegion.getEvictionController()).thenReturn(algorithm);
}
Also used : LRUAlgorithm(org.apache.geode.internal.cache.lru.LRUAlgorithm) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PartitionedRegionStats(org.apache.geode.internal.cache.PartitionedRegionStats)

Example 5 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexInitOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryPutUsingClientOnRR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryPutUsingClientOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    IgnoredException.addIgnoredException("Unexpected IOException:");
    IgnoredException.addIgnoredException("java.net.SocketException");
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                CacheServer bridge = cache.addCacheServer();
                bridge.setPort(0);
                bridge.start();
                bridgeServerPort = bridge.getPort();
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            } catch (IOException e) {
                e.printStackTrace();
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("idIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    final int port = vm0.invoke(() -> ConcurrentIndexInitOnOverflowRegionDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
    // Start changing the value in Region which should turn into a deadlock if
    // the fix is not there
    vm1.invoke(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            disconnectFromDS();
            ClientCache clientCache = new ClientCacheFactory().addPoolServer(host0, port).create();
            // Do a put in region.
            Region r = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Set Test Hook") {

        @Override
        public void run2() throws CacheException {
            // Set test hook before client operation
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
        }
    });
    AsyncInvocation asyncInv1 = vm1.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            ClientCache clientCache = ClientCacheFactory.getAnyInstance();
            // Do a put in region.
            Region r = clientCache.getRegion(name);
            // Destroy one of the values.
            clientCache.getLogger().fine("Destroying the value");
            r.destroy(1);
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            while (!hooked) {
                Wait.pause(100);
            }
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.status", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
    vm0.invoke(new CacheSerializableRunnable("Set Test Hook") {

        @Override
        public void run2() throws CacheException {
            assertNotNull(IndexManager.testHook);
            IndexManager.testHook = null;
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) IOException(java.io.IOException) ClientCache(org.apache.geode.cache.client.ClientCache) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) ClientCacheFactory(org.apache.geode.cache.client.ClientCacheFactory) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) ClientCache(org.apache.geode.cache.client.ClientCache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

EvictionAttributesImpl (org.apache.geode.internal.cache.EvictionAttributesImpl)12 Test (org.junit.Test)8 AttributesFactory (org.apache.geode.cache.AttributesFactory)6 Cache (org.apache.geode.cache.Cache)6 CacheException (org.apache.geode.cache.CacheException)6 DiskStore (org.apache.geode.cache.DiskStore)6 Region (org.apache.geode.cache.Region)6 RegionFactory (org.apache.geode.cache.RegionFactory)6 Index (org.apache.geode.cache.query.Index)6 PortfolioData (org.apache.geode.cache.query.data.PortfolioData)6 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)6 AsyncInvocation (org.apache.geode.test.dunit.AsyncInvocation)6 Host (org.apache.geode.test.dunit.Host)6 VM (org.apache.geode.test.dunit.VM)6 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)6 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)4 Query (org.apache.geode.cache.query.Query)4 SelectResults (org.apache.geode.cache.query.SelectResults)4 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)4 IOException (java.io.IOException)2