Search in sources :

Example 6 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnPersistentRR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPersistentRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if
    // the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 7 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnPersistentPR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPersistentPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
                attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if
    // the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 8 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnPR.

/**
   *
   */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.PARTITION);
                attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 9 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class DynamicRegionFactory method createDynamicRegionImpl.

private Region createDynamicRegionImpl(String parentRegionName, String newRegionName, boolean addEntry) throws CacheException {
    Region parentRegion = this.cache.getRegion(parentRegionName);
    if (parentRegion == null) {
        String errMsg = LocalizedStrings.DynamicRegionFactory_ERROR__COULD_NOT_FIND_A_REGION_NAMED___0_.toLocalizedString(parentRegionName);
        RegionDestroyedException e = new RegionDestroyedException(errMsg, parentRegionName);
        this.cache.getLoggerI18n().warning(LocalizedStrings.DynamicRegionFactory_ERROR__COULD_NOT_FIND_A_REGION_NAMED___0_, parentRegionName, e);
        throw e;
    }
    // Create RegionAttributes by inheriting from the parent
    RegionAttributes rra = parentRegion.getAttributes();
    AttributesFactory af = new AttributesFactory(rra);
    EvictionAttributes ev = rra.getEvictionAttributes();
    if (ev != null && ev.getAlgorithm().isLRU()) {
        EvictionAttributes rev = new EvictionAttributesImpl((EvictionAttributesImpl) ev);
        af.setEvictionAttributes(rev);
    }
    // regions
    if (newRegionName.endsWith("_PRTEST_")) {
        af.setPartitionAttributes(new PartitionAttributesFactory().create());
    }
    RegionAttributes newRegionAttributes = af.create();
    Region newRegion;
    try {
        newRegion = parentRegion.createSubregion(newRegionName, newRegionAttributes);
        this.cache.getLoggerI18n().fine("Created dynamic region " + newRegion);
    } catch (RegionExistsException ex) {
        // a race condition exists that can cause this so just fine log it
        this.cache.getLoggerI18n().fine("DynamicRegion " + newRegionName + " in parent " + parentRegionName + " already existed");
        newRegion = ex.getRegion();
    }
    if (addEntry) {
        DynamicRegionAttributes dra = new DynamicRegionAttributes();
        dra.name = newRegionName;
        dra.rootRegionName = parentRegion.getFullPath();
        if (this.cache.getLoggerI18n().fineEnabled()) {
            this.cache.getLoggerI18n().fine("Putting entry into dynamic region list at key: " + newRegion.getFullPath());
        }
        this.dynamicRegionList.put(newRegion.getFullPath(), dra);
    }
    if (this.config.getRegisterInterest()) {
        ServerRegionProxy proxy = ((LocalRegion) newRegion).getServerProxy();
        if (proxy != null) {
            if (((Pool) proxy.getPool()).getSubscriptionEnabled()) {
                try {
                    newRegion.registerInterest("ALL_KEYS");
                } catch (GemFireSecurityException ex) {
                    // Ignore security exceptions here
                    this.cache.getSecurityLoggerI18n().warning(LocalizedStrings.DynamicRegionFactory_EXCEPTION_WHEN_REGISTERING_INTEREST_FOR_ALL_KEYS_IN_DYNAMIC_REGION_0_1, new Object[] { newRegion.getFullPath(), ex });
                }
            }
        }
    }
    if (regionCreateSleepMillis > 0) {
        try {
            Thread.sleep(regionCreateSleepMillis);
        } catch (InterruptedException ignore) {
            Thread.currentThread().interrupt();
        }
    }
    if (this.cache.getLoggerI18n().fineEnabled()) {
        this.cache.getLoggerI18n().fine("Created Dynamic Region " + newRegion.getFullPath());
    }
    return newRegion;
}
Also used : DynamicRegionAttributes(org.apache.geode.internal.cache.DynamicRegionAttributes) DynamicRegionAttributes(org.apache.geode.internal.cache.DynamicRegionAttributes) LocalRegion(org.apache.geode.internal.cache.LocalRegion) GemFireSecurityException(org.apache.geode.security.GemFireSecurityException) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) ServerRegionProxy(org.apache.geode.cache.client.internal.ServerRegionProxy) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) Pool(org.apache.geode.cache.client.Pool)

Example 10 with EvictionAttributesImpl

use of org.apache.geode.internal.cache.EvictionAttributesImpl in project geode by apache.

the class CacheXmlParser method endLRUMemorySize.

/**
   * Complete the configuration of a <code>lru-memory-size</code> eviction controller. Check for the
   * declaration of an {@link ObjectSizer}. Assign the attributes to the enclose
   * <code>region-attributes</code>
   */
private void endLRUMemorySize() {
    Object declCheck = stack.peek();
    Declarable d = null;
    if (declCheck instanceof String || declCheck instanceof Parameter) {
        d = createDeclarable();
        if (!(d instanceof ObjectSizer)) {
            throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_OBJECTSIZER.toLocalizedString(d.getClass().getName()));
        }
    }
    EvictionAttributesImpl eai = (EvictionAttributesImpl) stack.pop();
    if (d != null) {
        eai.setObjectSizer((ObjectSizer) d);
    }
    RegionAttributesCreation regAttrs = peekRegionAttributesContext(LRU_MEMORY_SIZE);
    regAttrs.setEvictionAttributes(eai);
}
Also used : Declarable(org.apache.geode.cache.Declarable) CacheXmlException(org.apache.geode.cache.CacheXmlException) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) ObjectSizer(org.apache.geode.cache.util.ObjectSizer)

Aggregations

EvictionAttributesImpl (org.apache.geode.internal.cache.EvictionAttributesImpl)12 Test (org.junit.Test)8 AttributesFactory (org.apache.geode.cache.AttributesFactory)6 Cache (org.apache.geode.cache.Cache)6 CacheException (org.apache.geode.cache.CacheException)6 DiskStore (org.apache.geode.cache.DiskStore)6 Region (org.apache.geode.cache.Region)6 RegionFactory (org.apache.geode.cache.RegionFactory)6 Index (org.apache.geode.cache.query.Index)6 PortfolioData (org.apache.geode.cache.query.data.PortfolioData)6 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)6 AsyncInvocation (org.apache.geode.test.dunit.AsyncInvocation)6 Host (org.apache.geode.test.dunit.Host)6 VM (org.apache.geode.test.dunit.VM)6 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)6 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)4 Query (org.apache.geode.cache.query.Query)4 SelectResults (org.apache.geode.cache.query.SelectResults)4 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)4 IOException (java.io.IOException)2