Search in sources :

Example 96 with CacheSerializableRunnable

use of org.apache.geode.cache30.CacheSerializableRunnable in project geode by apache.

the class Shipment method testMetadataIsSameOnAllServersAndClientsHA.

@Test
public void testMetadataIsSameOnAllServersAndClientsHA() {
    Integer port0 = (Integer) member0.invoke(() -> PartitionedRegionSingleHopDUnitTest.createServer(2, 4));
    Integer port1 = (Integer) member1.invoke(() -> PartitionedRegionSingleHopDUnitTest.createServer(2, 4));
    createClient(port0, port1, port0, port1);
    put();
    ClientMetadataService cms = ((GemFireCacheImpl) cache).getClientMetadataService();
    cms.getClientPRMetadata((LocalRegion) region);
    final Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();
    Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> (regionMetaData.size() == 1));
    assertEquals(1, regionMetaData.size());
    assertTrue(regionMetaData.containsKey(region.getFullPath()));
    member0.invoke(new CacheSerializableRunnable("aba") {

        @Override
        public void run2() throws CacheException {
            final PartitionedRegion pr = (PartitionedRegion) region;
            ConcurrentHashMap<Integer, Set<ServerBucketProfile>> serverMap = pr.getRegionAdvisor().getAllClientBucketProfilesTest();
        }
    });
    member1.invoke(new CacheSerializableRunnable("aba") {

        @Override
        public void run2() throws CacheException {
            final PartitionedRegion pr = (PartitionedRegion) region;
            ConcurrentHashMap<Integer, Set<ServerBucketProfile>> serverMap = pr.getRegionAdvisor().getAllClientBucketProfilesTest();
        }
    });
    ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
    final Map<Integer, List<BucketServerLocation66>> clientMap = prMetaData.getBucketServerLocationsMap_TEST_ONLY();
    Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> (clientMap.size() == 4));
    for (Entry entry : clientMap.entrySet()) {
        assertEquals(2, ((List) entry.getValue()).size());
    }
    member0.invoke(() -> PartitionedRegionSingleHopDUnitTest.verifyMetadata(clientMap));
    member1.invoke(() -> PartitionedRegionSingleHopDUnitTest.verifyMetadata(clientMap));
    member0.invoke(() -> PartitionedRegionSingleHopDUnitTest.stopServer());
    put();
    cms = ((GemFireCacheImpl) cache).getClientMetadataService();
    cms.getClientPRMetadata((LocalRegion) region);
    // member1.invoke(() -> PartitionedRegionSingleHopDUnitTest.verifyMetadata(clientMap));
    assertEquals(4, /* numBuckets */
    clientMap.size());
    for (Entry entry : clientMap.entrySet()) {
        assertEquals(1, ((List) entry.getValue()).size());
    }
    assertEquals(1, regionMetaData.size());
    assertTrue(regionMetaData.containsKey(region.getFullPath()));
    assertEquals(4, /* numBuckets */
    clientMap.size());
    Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> {
        int bucketId = -1;
        int size = -1;
        List globalList = new ArrayList();
        boolean finished = true;
        for (Entry entry : clientMap.entrySet()) {
            List list = (List) entry.getValue();
            if (list.size() != 1) {
                size = list.size();
                globalList = list;
                bucketId = (Integer) entry.getKey();
                finished = false;
                System.out.println("bucket copies are not created, the locations size for bucket id : " + bucketId + " size : " + size + " the list is " + globalList);
            }
        }
        return finished;
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) ClientMetadataService(org.apache.geode.cache.client.internal.ClientMetadataService) ArrayList(java.util.ArrayList) ServerBucketProfile(org.apache.geode.internal.cache.BucketAdvisor.ServerBucketProfile) Entry(java.util.Map.Entry) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) ClientPartitionAdvisor(org.apache.geode.cache.client.internal.ClientPartitionAdvisor) List(java.util.List) ArrayList(java.util.ArrayList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) ClientServerTest(org.apache.geode.test.junit.categories.ClientServerTest) Test(org.junit.Test)

Example 97 with CacheSerializableRunnable

use of org.apache.geode.cache30.CacheSerializableRunnable in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithDestroy.

@Test
public void testColocatedPRWithDestroy() throws Throwable {
    createCacheInAllVms();
    redundancy = new Integer(0);
    localMaxmemory = new Integer(50);
    totalNumBuckets = new Integer(11);
    try {
        // Create Customer PartitionedRegion in All VMs
        regionName = CustomerPartitionedRegionName;
        colocatedWith = null;
        isPartitionResolver = new Boolean(false);
        attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
        createPartitionedRegion(attributeObjects);
        // Create Order PartitionedRegion in All VMs
        regionName = OrderPartitionedRegionName;
        colocatedWith = CustomerPartitionedRegionName;
        isPartitionResolver = new Boolean(false);
        attributeObjects = new Object[] { regionName, redundancy, localMaxmemory, totalNumBuckets, colocatedWith, isPartitionResolver };
        createPartitionedRegion(attributeObjects);
    } catch (Exception Expected) {
        assertTrue(Expected instanceof IllegalStateException);
    }
    // Put the customer 1-10 in CustomerPartitionedRegion
    accessor.invoke(() -> PRColocationDUnitTest.putCustomerPartitionedRegion(CustomerPartitionedRegionName));
    // Put the order 1-10 for each Customer in OrderPartitionedRegion
    accessor.invoke(() -> PRColocationDUnitTest.putOrderPartitionedRegion(OrderPartitionedRegionName));
    // add expected exception string
    final String expectedExMessage = "colocation chain cannot be destroyed, " + "unless all its children";
    final IgnoredException ex = IgnoredException.addIgnoredException(expectedExMessage, dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
                fail("It should have thrown an Exception saying: " + expectedExMessage);
            } catch (IllegalStateException expected) {
                LogWriterUtils.getLogWriter().info("Got message: " + expected.getMessage());
                assertTrue(expected.getMessage().contains(expectedExMessage));
            }
        }
    });
    ex.remove();
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + OrderPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
            } catch (Exception unexpected) {
                unexpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
                fail("Could not destroy the child region.");
            }
        }
    });
    dataStore1.invoke(new CacheSerializableRunnable("PR with destroy") {

        @Override
        public void run2() {
            Region partitionedregion = basicGetCache().getRegion(Region.SEPARATOR + CustomerPartitionedRegionName);
            try {
                partitionedregion.destroyRegion();
            } catch (Exception unexpected) {
                unexpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
                fail("Could not destroy the parent region.");
            }
        }
    });
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 98 with CacheSerializableRunnable

use of org.apache.geode.cache30.CacheSerializableRunnable in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode2.

@Test
public void testColocatedPRWithAccessorOnDifferentNode2() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRWithAccessorOnDifferentNode2") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 99 with CacheSerializableRunnable

use of org.apache.geode.cache30.CacheSerializableRunnable in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithPROnDifferentNode1.

@Test
public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("TestColocatedPRWithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    dataStore2.invoke(new CacheSerializableRunnable("testColocatedPRwithPROnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(20);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets", dataStore2);
    dataStore2.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                // fail("It should have failed with Exception : Colocated regions
                // should have accessors at the same node");
                r.put("key", "value");
                fail("Failed because we did not receive the exception - : Cannot create buckets, " + "as colocated regions are not configured to be at the same nodes.");
            } catch (Exception Expected) {
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().contains("Cannot create buckets, as " + "colocated regions are not configured to be at the same nodes."));
            }
        }
    });
    ex.remove();
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with PR on different node") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                Region r = basicGetCache().createRegion(regionName, attr.create());
                r.put("key", "value");
                assertEquals("value", (String) r.get("key"));
            } catch (Exception NotExpected) {
                NotExpected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Unexpected Exception Message : " + NotExpected.getMessage());
                Assert.fail("Unpexpected Exception", NotExpected);
            }
        }
    });
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 100 with CacheSerializableRunnable

use of org.apache.geode.cache30.CacheSerializableRunnable in project geode by apache.

the class PRColocationDUnitTest method testColocatedPRWithAccessorOnDifferentNode1.

@Test
public void testColocatedPRWithAccessorOnDifferentNode1() throws Throwable {
    createCacheInAllVms();
    dataStore1.invoke(new CacheSerializableRunnable("testColocatedPRwithAccessorOnDifferentNode") {

        @Override
        public void run2() {
            String partitionedRegionName = CustomerPartitionedRegionName;
            colocatedWith = null;
            isPartitionResolver = new Boolean(false);
            redundancy = new Integer(0);
            localMaxmemory = new Integer(50);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
            assertNotNull(pr);
            LogWriterUtils.getLogWriter().info("Partitioned Region " + partitionedRegionName + " created Successfully :" + pr.toString());
        }
    });
    // add expected exception string
    final IgnoredException ex = IgnoredException.addIgnoredException("Colocated regions should have accessors at the same node", dataStore1);
    dataStore1.invoke(new CacheSerializableRunnable("Colocated PR with Accessor on different nodes") {

        @Override
        public void run2() {
            regionName = OrderPartitionedRegionName;
            colocatedWith = CustomerPartitionedRegionName;
            isPartitionResolver = new Boolean(false);
            localMaxmemory = new Integer(0);
            redundancy = new Integer(0);
            totalNumBuckets = new Integer(11);
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy.intValue()).setLocalMaxMemory(localMaxmemory.intValue()).setTotalNumBuckets(totalNumBuckets.intValue()).setColocatedWith(colocatedWith);
            if (isPartitionResolver.booleanValue()) {
                paf.setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
            }
            PartitionAttributes prAttr = paf.create();
            AttributesFactory attr = new AttributesFactory();
            attr.setPartitionAttributes(prAttr);
            assertNotNull(basicGetCache());
            try {
                basicGetCache().createRegion(regionName, attr.create());
                fail("It should have failed with Exception: Colocated regions " + "should have accessors at the same node");
            } catch (Exception Expected) {
                Expected.printStackTrace();
                LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
                assertTrue(Expected.getMessage().startsWith("Colocated regions should have accessors at the same node"));
            }
        }
    });
    ex.remove();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Aggregations

CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)595 CacheException (org.apache.geode.cache.CacheException)415 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)369 Test (org.junit.Test)369 Region (org.apache.geode.cache.Region)307 VM (org.apache.geode.test.dunit.VM)279 Host (org.apache.geode.test.dunit.Host)274 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)179 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)165 AttributesFactory (org.apache.geode.cache.AttributesFactory)145 IOException (java.io.IOException)135 Cache (org.apache.geode.cache.Cache)124 QueryService (org.apache.geode.cache.query.QueryService)118 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)107 LocalRegion (org.apache.geode.internal.cache.LocalRegion)106 SelectResults (org.apache.geode.cache.query.SelectResults)85 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)75 ClientServerTest (org.apache.geode.test.junit.categories.ClientServerTest)71 IgnoredException (org.apache.geode.test.dunit.IgnoredException)65 ClientSubscriptionTest (org.apache.geode.test.junit.categories.ClientSubscriptionTest)61