Search in sources :

Example 6 with PartitionAttributesFactory

use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnRR.

// GEODE-1828
@Category(FlakyTest.class)
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnRR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                DiskStore ds = cache.findDiskStore("disk");
                if (ds == null) {
                    ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
                }
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
                evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
                attr.setEvictionAttributes(evicAttr);
                attr.setDataPolicy(DataPolicy.REPLICATE);
                // attr.setPartitionAttributes(new
                // PartitionAttributesFactory().setTotalNumBuckets(1).create());
                attr.setDiskStoreName("disk");
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(100);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) EvictionAttributesImpl(org.apache.geode.internal.cache.EvictionAttributesImpl) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) Category(org.junit.experimental.categories.Category) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 7 with PartitionAttributesFactory

use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.

the class ConcurrentIndexOperationsOnOverflowRegionDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR.

/**
  *
  */
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQueryOnOnNonOverflowPR() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    hooked = false;
    name = "PartionedPortfoliosPR";
    // Create Overflow Persistent Partition Region
    vm0.invoke(new CacheSerializableRunnable("Create local region with synchronous index maintenance") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionRegion = null;
            IndexManager.testHook = null;
            try {
                AttributesFactory attr = new AttributesFactory();
                attr.setValueConstraint(PortfolioData.class);
                attr.setIndexMaintenanceSynchronous(true);
                attr.setDataPolicy(DataPolicy.PARTITION);
                attr.setPartitionAttributes(new PartitionAttributesFactory().setTotalNumBuckets(1).create());
                RegionFactory regionFactory = cache.createRegionFactory(attr.create());
                partitionRegion = regionFactory.create(name);
            } catch (IllegalStateException ex) {
                LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
            }
            assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
            assertNotNull("Region ref null", partitionRegion);
            assertTrue("Region ref claims to be destroyed", !partitionRegion.isDestroyed());
            // Create Indexes
            try {
                Index index = cache.getQueryService().createIndex("statusIndex", "p.ID", "/" + name + " p");
                assertNotNull(index);
            } catch (Exception e1) {
                e1.printStackTrace();
                fail("Index creation failed");
            }
        }
    });
    // Start changing the value in Region which should turn into a deadlock if the fix is not there
    AsyncInvocation asyncInv1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {

        @Override
        public void run2() throws CacheException {
            // Do a put in region.
            Region r = getCache().getRegion(name);
            for (int i = 0; i < 100; i++) {
                r.put(i, new PortfolioData(i));
            }
            assertNull(IndexManager.testHook);
            IndexManager.testHook = new IndexManagerNoWaitTestHook();
            // Destroy one of the values.
            getCache().getLogger().fine("Destroying the value");
            r.destroy(1);
            IndexManager.testHook = null;
        }
    });
    AsyncInvocation asyncInv2 = vm0.invokeAsync(new CacheSerializableRunnable("Run query on region") {

        @Override
        public void run2() throws CacheException {
            Query statusQuery = getCache().getQueryService().newQuery("select * from /" + name + " p where p.ID > -1");
            while (!hooked) {
                Wait.pause(10);
            }
            try {
                getCache().getLogger().fine("Querying the region");
                SelectResults results = (SelectResults) statusQuery.execute();
                assertEquals(100, results.size());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // If we take more than 30 seconds then its a deadlock.
    ThreadUtils.join(asyncInv2, 30 * 1000);
    ThreadUtils.join(asyncInv1, 30 * 1000);
}
Also used : Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) Index(org.apache.geode.cache.query.Index) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) CacheException(org.apache.geode.cache.CacheException) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) RegionFactory(org.apache.geode.cache.RegionFactory) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) PortfolioData(org.apache.geode.cache.query.data.PortfolioData) Cache(org.apache.geode.cache.Cache) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 8 with PartitionAttributesFactory

use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForCacheClose.

/**
   * This function <br>
   * 1. calls the cache.close on the VM <br>
   * 2. creates the cache again & also the PR <br>
   * 
   * @return cacheSerializable object
   *
   *         NOTE: Closing of the cache must be done from the test case rather than in
   *         PRQueryDUintHelper
   *
   */
public CacheSerializableRunnable getCacheSerializableRunnableForCacheClose(final String regionName, final int redundancy, final Class constraint) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("cacheClose") {

        @Override
        public void run2() throws CacheException {
            final String expectedCacheClosedException = CacheClosedException.class.getName();
            final String expectedReplyException = ReplyException.class.getName();
            getCache().getLogger().info("<ExpectedException action=add>" + expectedCacheClosedException + "</ExpectedException>");
            getCache().getLogger().info("<ExpectedException action=add>" + expectedReplyException + "</ExpectedException>");
            Cache cache = getCache();
            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Recreating the cache ");
            AttributesFactory attr = new AttributesFactory();
            attr.setValueConstraint(constraint);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
            attr.setPartitionAttributes(prAttr);
            final CountDownLatch cdl = new CountDownLatch(1);
            ResourceObserverAdapter observer = new InternalResourceManager.ResourceObserverAdapter() {

                @Override
                public void recoveryFinished(Region region) {
                    cdl.countDown();
                }
            };
            InternalResourceManager.setResourceObserver(observer);
            try {
                cache.createRegion(regionName, attr.create());
                // Wait for recovery to finish
                cdl.await();
            } catch (InterruptedException e) {
                Assert.fail("interupted", e);
            } finally {
                InternalResourceManager.setResourceObserver(null);
            }
            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Recreated on VM ");
            getCache().getLogger().info("<ExpectedException action=remove>" + expectedReplyException + "</ExpectedException>");
            getCache().getLogger().info("<ExpectedException action=remove>" + expectedCacheClosedException + "</ExpectedException>");
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CountDownLatch(java.util.concurrent.CountDownLatch) ResourceObserverAdapter(org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter) Cache(org.apache.geode.cache.Cache)

Example 9 with PartitionAttributesFactory

use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.

the class PRQueryDUnitTest method testQueryResultsFromMembers.

@Test
public void testQueryResultsFromMembers() throws Exception {
    final String rName = getUniqueName();
    Host host = Host.getHost(0);
    final VM datastore1 = host.getVM(2);
    final VM datastore2 = host.getVM(3);
    final int totalBuckets = 10;
    final int redCop = 0;
    CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {

        public void run2() throws CacheException {
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
            attr.setPartitionAttributes(prAttr);
            getCache().createRegion(rName, attr.create());
        }
    };
    datastore1.invoke(createPR);
    datastore2.invoke(createPR);
    AttributesFactory attr = new AttributesFactory();
    PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
    attr.setPartitionAttributes(prAttr);
    PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
    // Create bucket zero, one and two
    int numEntries = 100;
    for (int i = 1; i <= numEntries; i++) {
        pr.put(new Integer(i), new Portfolio(i));
    }
    int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
    String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
    try {
        for (int q = 0; q < queries.length; q++) {
            Object[] params = new Object[0];
            final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
            final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
            // TODO assert this is the correct set of bucket Ids,
            final HashSet<Integer> buckets = new HashSet<Integer>();
            for (int i = 0; i < totalBuckets; i++) {
                buckets.add(new Integer(i));
            }
            final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
            class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {

                public HashMap resultsPerMember = new HashMap();

                public void hook(int spot) throws RuntimeException {
                    int size = 0;
                    if (spot == 3) {
                        for (Object mr : qe.getResultsPerMember().entrySet()) {
                            Map.Entry e = (Map.Entry) mr;
                            Collection<Collection> results = (Collection<Collection>) e.getValue();
                            for (Collection<Object> r : results) {
                                if (this.resultsPerMember.containsKey(e.getKey())) {
                                    this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
                                } else {
                                    this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
                                }
                            }
                        }
                    }
                }
            }
            ;
            final MyTestHook th = new MyTestHook();
            qe.queryBuckets(th);
            for (Object r : th.resultsPerMember.entrySet()) {
                Map.Entry e = (Map.Entry) r;
                Integer res = (Integer) e.getValue();
                LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
                assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
            }
        }
    } finally {
        getCache().close();
    }
}
Also used : HashMap(java.util.HashMap) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) HashSet(java.util.HashSet) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Portfolio(org.apache.geode.cache.query.data.Portfolio) Host(org.apache.geode.test.dunit.Host) PartitionedRegionQueryEvaluator(org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) Collection(java.util.Collection) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 10 with PartitionAttributesFactory

use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForColocatedChildCreate.

/**
   * This function creates the parent region of colocated pair of PR's given the scope & the
   * redundancy parameters for the parent *
   *
   * @param regionName
   * @param redundancy
   * @param constraint
   * @param isPersistent
   * @return cacheSerializable object
   */
public CacheSerializableRunnable getCacheSerializableRunnableForColocatedChildCreate(final String regionName, final int redundancy, final Class constraint, boolean isPersistent) {
    final String childRegionName = regionName + "Child";
    final String diskName = "disk";
    SerializableRunnable createPrRegion;
    createPrRegion = new CacheSerializableRunnable(regionName + "-ChildRegion") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            Region partitionedregion = null;
            Region childRegion = null;
            AttributesFactory attr = new AttributesFactory();
            attr.setValueConstraint(constraint);
            if (isPersistent) {
                DiskStore ds = cache.findDiskStore(diskName);
                if (ds == null) {
                    // ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs())
                    ds = cache.createDiskStoreFactory().setDiskDirs(org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase.getDiskDirs()).create(diskName);
                }
                attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
                attr.setDiskStoreName(diskName);
            } else {
                attr.setDataPolicy(DataPolicy.PARTITION);
                attr.setDiskStoreName(null);
            }
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy);
            attr.setPartitionAttributes(paf.create());
            // skip parent region creation
            // partitionedregion = cache.createRegion(regionName, attr.create());
            // child region
            attr.setValueConstraint(constraint);
            paf.setColocatedWith(regionName);
            attr.setPartitionAttributes(paf.create());
            childRegion = cache.createRegion(childRegionName, attr.create());
        }
    };
    return (CacheSerializableRunnable) createPrRegion;
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache)

Aggregations

PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)340 AttributesFactory (org.apache.geode.cache.AttributesFactory)289 Region (org.apache.geode.cache.Region)173 Test (org.junit.Test)154 Cache (org.apache.geode.cache.Cache)136 PartitionAttributes (org.apache.geode.cache.PartitionAttributes)116 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)112 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)110 VM (org.apache.geode.test.dunit.VM)101 Host (org.apache.geode.test.dunit.Host)99 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)95 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)75 CacheException (org.apache.geode.cache.CacheException)58 LocalRegion (org.apache.geode.internal.cache.LocalRegion)48 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)47 IOException (java.io.IOException)42 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)42 DiskStore (org.apache.geode.cache.DiskStore)41 RegionAttributes (org.apache.geode.cache.RegionAttributes)41 BucketRegion (org.apache.geode.internal.cache.BucketRegion)35