Search in sources :

Example 21 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class PdxStringQueryDUnitTest method testPartitionRegionNoIndex.

@Test
public void testPartitionRegionNoIndex() throws CacheException {
    final Host host = Host.getHost(0);
    VM server0 = host.getVM(0);
    VM server1 = host.getVM(1);
    VM server2 = host.getVM(2);
    VM client = host.getVM(3);
    final int numberOfEntries = 10;
    final boolean isPr = true;
    // Start server1 and create index
    server0.invoke(new CacheSerializableRunnable("Create Server1") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            // create a local query service
            QueryService localQueryService = null;
            try {
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            Index index = null;
            // created
            try {
                index = localQueryService.createIndex("secIdIndex", "pos.secIdIndexed", regName + " p, p.positions.values pos");
                if (index instanceof PartitionedIndex) {
                    for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
                        if (!(o instanceof RangeIndex)) {
                            fail("RangeIndex Index should have been created instead of " + index.getClass());
                        }
                    }
                } else {
                    fail("Partitioned index expected");
                }
            } catch (Exception ex) {
                fail("Failed to create index." + ex.getMessage());
            }
        }
    });
    // Start server2
    server1.invoke(new CacheSerializableRunnable("Create Server2") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Start server3
    server2.invoke(new CacheSerializableRunnable("Create Server3") {

        public void run2() throws CacheException {
            configAndStartBridgeServer(isPr, false, false);
            Region region = getRootRegion().getSubregion(regionName);
        }
    });
    // Client pool.
    final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
    // Create client pool.
    final String poolName = "testClientServerQueryPool";
    createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
    // Create client region and put PortfolioPdx objects (PdxInstances)
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
            ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
            Region region = createRegion(regionName, rootRegionName, factory.create());
            LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
            for (int i = 0; i < numberOfEntries; i++) {
                region.put("key-" + i, new PortfolioPdx(i));
            }
        }
    });
    // Execute queries from client to server and locally on client
    SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {

        public void run2() throws CacheException {
            QueryService remoteQueryService = null;
            QueryService localQueryService = null;
            SelectResults[][] rs = new SelectResults[1][2];
            SelectResults[] resWithoutIndexRemote = new SelectResults[queryString.length];
            SelectResults[] resWithIndexRemote = new SelectResults[queryString.length];
            SelectResults[] resWithoutIndexLocal = new SelectResults[queryString.length];
            SelectResults[] resWithIndexLocal = new SelectResults[queryString.length];
            try {
                remoteQueryService = (PoolManager.find(poolName)).getQueryService();
                localQueryService = getCache().getQueryService();
            } catch (Exception e) {
                Assert.fail("Failed to get QueryService.", e);
            }
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
                    Query query = remoteQueryService.newQuery(queryString[i]);
                    rs[0][0] = (SelectResults) query.execute();
                    resWithoutIndexRemote[i] = rs[0][0];
                    LogWriterUtils.getLogWriter().info("RR remote no index size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][0].asList(), queryString[i]);
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
                    query = localQueryService.newQuery(queryString[i]);
                    rs[0][1] = (SelectResults) query.execute();
                    resWithoutIndexLocal[i] = rs[0][1];
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + "  client local indexType:no index size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
                    ;
                    checkForPdxString(rs[0][1].asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
                try {
                    // to compare remote query results with and without index
                    LogWriterUtils.getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
                    Query query = remoteQueryService.newQuery(queryString2[i]);
                    resWithIndexRemote[i] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + "  remote region2 size of resultset: " + resWithIndexRemote[i].size() + " for query: " + queryString2[i]);
                    ;
                    checkForPdxString(resWithIndexRemote[i].asList(), queryString2[i]);
                    // to compare local query results with and without index
                    LogWriterUtils.getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
                    query = localQueryService.newQuery(queryString2[i]);
                    resWithIndexLocal[i] = (SelectResults) query.execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + "  local region2 size of resultset: " + resWithIndexLocal[i].size() + " for query: " + queryString2[i]);
                    ;
                    checkForPdxString(resWithIndexLocal[i].asList(), queryString2[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString2[i], e);
                }
                if (i < orderByQueryIndex) {
                    // Compare local and remote query results.
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        LogWriterUtils.getLogWriter().info("result0=" + rs[0][0].asList());
                        LogWriterUtils.getLogWriter().info("result1=" + rs[0][1].asList());
                        fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, isPr);
                }
            }
            for (int i = 0; i < queryString.length; i++) {
                rs[0][0] = resWithoutIndexRemote[i];
                rs[0][1] = resWithIndexRemote[i];
                if (i < orderByQueryIndex) {
                    // Compare local and remote query results.
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Results with and without index are not matching for query :" + queryString2[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, isPr);
                }
            }
            for (int i = 0; i < queryString.length; i++) {
                rs[0][0] = resWithoutIndexLocal[i];
                rs[0][1] = resWithIndexLocal[i];
                if (i < orderByQueryIndex) {
                    // Compare local and remote query results.
                    if (!compareResultsOfWithAndWithoutIndex(rs)) {
                        fail("Results with and without index are not matching for query :" + queryString2[i]);
                    }
                } else {
                    // compare the order of results returned
                    compareResultsOrder(rs, isPr);
                }
            }
        }
    };
    client.invoke(executeQueries);
    // Put Non Pdx objects on server execute queries locally
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(regionName);
            LogWriterUtils.getLogWriter().info("Put Objects locally on server");
            for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
                region.put("key-" + i, new Portfolio(i));
            }
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("PR server local indexType:no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
                try {
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString2[i]).execute();
                    LogWriterUtils.getLogWriter().info("PR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString2[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString2[i], e);
                }
            }
        }
    });
    // test for readSerialized flag
    server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService localQueryService = getCache().getQueryService();
            // Query server1 locally to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("isPR: " + isPr + " server local readSerializedTrue: indexType: no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    // test for readSerialized flag on client
    client.invoke(new CacheSerializableRunnable("Create client") {

        public void run2() throws CacheException {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            cache.setReadSerialized(true);
            QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
            // Query server1 remotely to check if PdxString is not being returned
            for (int i = 0; i < queryString.length; i++) {
                try {
                    LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
                    SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
                    LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType:no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
                    // The results should not be PdxString
                    checkForPdxString(rs.asList(), queryString[i]);
                } catch (Exception e) {
                    Assert.fail("Failed executing " + queryString[i], e);
                }
            }
        }
    });
    closeClient(server2);
    closeClient(client);
    closeClient(server1);
    closeClient(server0);
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) Index(org.apache.geode.cache.query.Index) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) PortfolioPdx(org.apache.geode.cache.query.data.PortfolioPdx) PdxString(org.apache.geode.pdx.internal.PdxString) IgnoredException(org.apache.geode.test.dunit.IgnoredException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) PartitionedIndex(org.apache.geode.cache.query.internal.index.PartitionedIndex) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RangeIndex(org.apache.geode.cache.query.internal.index.RangeIndex) CompactRangeIndex(org.apache.geode.cache.query.internal.index.CompactRangeIndex) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 22 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class MemoryThresholdsOffHeapDUnitTest method prRemotePutRejection.

private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
    final Host host = Host.getHost(0);
    final VM accessor = host.getVM(0);
    final VM[] servers = new VM[3];
    servers[0] = host.getVM(1);
    servers[1] = host.getVM(2);
    servers[2] = host.getVM(3);
    final String regionName = "offHeapPRRemotePutRejection";
    final int redundancy = 1;
    startCacheServer(servers[0], 0f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    startCacheServer(servers[1], 0f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    startCacheServer(servers[2], 0f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    accessor.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            getSystem(getOffHeapProperties());
            getCache();
            AttributesFactory factory = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy);
            paf.setLocalMaxMemory(0);
            paf.setTotalNumBuckets(11);
            factory.setPartitionAttributes(paf.create());
            factory.setOffHeap(true);
            createRegion(regionName, factory.create());
            return null;
        }
    });
    doPuts(accessor, regionName, false, false);
    final Range r1 = Range.DEFAULT;
    doPutAlls(accessor, regionName, false, false, r1);
    servers[0].invoke(addExpectedException);
    servers[1].invoke(addExpectedException);
    servers[2].invoke(addExpectedException);
    setUsageAboveCriticalThreshold(servers[0], regionName);
    final Set<InternalDistributedMember> criticalMembers = (Set) servers[0].invoke(new SerializableCallable() {

        public Object call() throws Exception {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
            final int hashKey = PartitionedRegionHelper.getHashKey(pr, null, "oh5", null, null);
            return pr.getRegionAdvisor().getBucketOwners(hashKey);
        }
    });
    accessor.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
            WaitCriterion wc = new WaitCriterion() {

                public String description() {
                    return "remote bucket not marked sick";
                }

                public boolean done() {
                    boolean keyFoundOnSickMember = false;
                    boolean caughtException = false;
                    for (int i = 0; i < 20; i++) {
                        Integer key = Integer.valueOf(i);
                        int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
                        Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
                        final boolean hasCriticalOwners = owners.removeAll(criticalMembers);
                        if (hasCriticalOwners) {
                            keyFoundOnSickMember = true;
                            try {
                                if (useTx)
                                    getCache().getCacheTransactionManager().begin();
                                pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
                                pr.put(key, "value");
                                if (useTx)
                                    getCache().getCacheTransactionManager().commit();
                            } catch (LowMemoryException ex) {
                                caughtException = true;
                                if (useTx)
                                    getCache().getCacheTransactionManager().rollback();
                            }
                        } else {
                            // puts on healthy member should continue
                            pr.put(key, "value");
                        }
                    }
                    return keyFoundOnSickMember && caughtException;
                }
            };
            Wait.waitForCriterion(wc, 10000, 10, true);
            return null;
        }
    });
    {
        Range r2 = new Range(r1, r1.width() + 1);
        doPutAlls(accessor, regionName, false, true, r2);
    }
    // Find all VMs that have a critical region
    SerializableCallable getMyId = new SerializableCallable() {

        public Object call() throws Exception {
            return ((GemFireCacheImpl) getCache()).getMyId();
        }
    };
    final Set<VM> criticalServers = new HashSet<VM>();
    for (final VM server : servers) {
        DistributedMember member = (DistributedMember) server.invoke(getMyId);
        if (criticalMembers.contains(member)) {
            criticalServers.add(server);
        }
    }
    if (localDestroy) {
        // local destroy the region on sick members
        for (final VM vm : criticalServers) {
            vm.invoke(new SerializableCallable("local destroy sick member") {

                public Object call() throws Exception {
                    Region r = getRootRegion().getSubregion(regionName);
                    LogWriterUtils.getLogWriter().info("PRLocalDestroy");
                    r.localDestroyRegion();
                    return null;
                }
            });
        }
    } else if (cacheClose) {
        // close cache on sick members
        for (final VM vm : criticalServers) {
            vm.invoke(new SerializableCallable("close cache sick member") {

                public Object call() throws Exception {
                    getCache().close();
                    return null;
                }
            });
        }
    } else {
        setUsageBelowEviction(servers[0], regionName);
        servers[0].invoke(removeExpectedException);
        servers[1].invoke(removeExpectedException);
        servers[2].invoke(removeExpectedException);
    }
    // do put all in a loop to allow distribution of message
    accessor.invoke(new SerializableCallable("Put in a loop") {

        public Object call() throws Exception {
            final Region r = getRootRegion().getSubregion(regionName);
            WaitCriterion wc = new WaitCriterion() {

                public String description() {
                    return "pr should have gone un-critical";
                }

                public boolean done() {
                    boolean done = true;
                    for (int i = 0; i < 20; i++) {
                        try {
                            r.put(i, "value");
                        } catch (LowMemoryException e) {
                            // expected
                            done = false;
                        }
                    }
                    return done;
                }
            };
            Wait.waitForCriterion(wc, 10000, 10, true);
            return null;
        }
    });
    doPutAlls(accessor, regionName, false, false, r1);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) Host(org.apache.geode.test.dunit.Host) Range(org.apache.geode.cache.management.MemoryThresholdsDUnitTest.Range) IgnoredException(org.apache.geode.test.dunit.IgnoredException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) LowMemoryException(org.apache.geode.cache.LowMemoryException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) CacheException(org.apache.geode.cache.CacheException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) ProxyBucketRegion(org.apache.geode.internal.cache.ProxyBucketRegion) LowMemoryException(org.apache.geode.cache.LowMemoryException) HashSet(java.util.HashSet)

Example 23 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class ResourceManagerDUnitTest method testGetInternalPRDetails.

/**
   * Creates partitioned regions in multiple vms and fully exercises the internal-only
   * getInternalPRDetails API on ResourceManager.
   */
@Test
public void testGetInternalPRDetails() {
    // two regions
    final String[] regionPath = new String[] { getUniqueName() + "-PR-0", getUniqueName() + "-PR-1" };
    // numBuckets config for the two regions
    final int[] numBuckets = new int[] { 100, 90 };
    // redundantCopies config for the two regions
    final int[] redundantCopies = new int[] { 1, 0 };
    // localMaxMemory config to use for three members
    final int[] localMaxMemory = new int[] { 50, 100, 0 };
    // bucketKeys to use for making three bckets in first PR
    final Integer[] bucketKeys = new Integer[] { Integer.valueOf(0), Integer.valueOf(42), Integer.valueOf(76) };
    assertEquals(0, bucketKeys[0].hashCode());
    assertEquals(42, bucketKeys[1].hashCode());
    assertEquals(76, bucketKeys[2].hashCode());
    createRegion(Host.getHost(0).getVM(0), regionPath[0], localMaxMemory[0], numBuckets[0], redundantCopies[0]);
    createRegion(Host.getHost(0).getVM(1), regionPath[0], localMaxMemory[1], numBuckets[0], redundantCopies[0]);
    createRegion(Host.getHost(0).getVM(2), regionPath[0], localMaxMemory[2], numBuckets[0], redundantCopies[0]);
    createRegion(Host.getHost(0).getVM(0), regionPath[1], localMaxMemory[0], numBuckets[1], redundantCopies[1]);
    // 2 MB in size
    final byte[] value = new byte[1024 * 1024 * 2];
    createBuckets(0, regionPath[0], bucketKeys, value);
    // identify the members and their config values
    final InternalDistributedMember[] members = new InternalDistributedMember[3];
    final long[] memberSizes = new long[members.length];
    final int[] memberBucketCounts = new int[members.length];
    final int[] memberPrimaryCounts = new int[members.length];
    for (int i = 0; i < members.length; i++) {
        final int vm = i;
        members[vm] = (InternalDistributedMember) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {

            public Object call() {
                return getSystem().getDistributedMember();
            }
        });
        memberSizes[vm] = ((Long) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {

            public Object call() {
                PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
                PartitionedRegionDataStore ds = pr.getDataStore();
                if (ds == null) {
                    return Long.valueOf(0);
                } else {
                    return Long.valueOf(getSize(ds));
                }
            }
        })).longValue();
        memberBucketCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {

            public Object call() {
                PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
                PartitionedRegionDataStore ds = pr.getDataStore();
                if (ds == null) {
                    return new Integer(0);
                } else {
                    return new Integer(ds.getBucketsManaged());
                }
            }
        })).intValue();
        memberPrimaryCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {

            public Object call() {
                PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
                PartitionedRegionDataStore ds = pr.getDataStore();
                if (ds == null) {
                    return new Integer(0);
                } else {
                    return new Integer(ds.getNumberOfPrimaryBucketsManaged());
                }
            }
        })).intValue();
    }
    // test everything here
    for (int i = 0; i < localMaxMemory.length; i++) {
        final int vm = i;
        Host.getHost(0).getVM(vm).invoke(new SerializableRunnable() {

            public void run() {
                Set<InternalPRInfo> detailsSet = new HashSet<InternalPRInfo>();
                GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
                for (PartitionedRegion pr : cache.getPartitionedRegions()) {
                    InternalPRInfo info = pr.getRedundancyProvider().buildPartitionedRegionInfo(true, cache.getInternalResourceManager().getLoadProbe());
                    detailsSet.add(info);
                }
                if (vm == 0) {
                    assertEquals(2, detailsSet.size());
                } else {
                    assertEquals(1, detailsSet.size());
                }
                // iterate over each InternalPRDetails
                for (Iterator<InternalPRInfo> prIter = detailsSet.iterator(); prIter.hasNext(); ) {
                    InternalPRInfo details = prIter.next();
                    // NOTE: getRegionPath() contains the Region.SEPARATOR + regionPath
                    assertTrue("Unknown regionPath=" + details.getRegionPath(), details.getRegionPath().contains(regionPath[0]) || details.getRegionPath().contains(regionPath[1]));
                    if (details.getRegionPath().contains(regionPath[0])) {
                        assertEquals(numBuckets[0], details.getConfiguredBucketCount());
                        assertEquals(0, details.getLowRedundancyBucketCount());
                        assertEquals(redundantCopies[0], details.getConfiguredRedundantCopies());
                        assertEquals(redundantCopies[0], details.getActualRedundantCopies());
                        assertNull(details.getColocatedWith());
                        Set<InternalPartitionDetails> memberDetails = details.getInternalPartitionDetails();
                        assertNotNull(memberDetails);
                        assertEquals(localMaxMemory.length - 1, memberDetails.size());
                        // iterate over each InternalPartitionDetails (datastores only)
                        for (Iterator<InternalPartitionDetails> mbrIter = memberDetails.iterator(); mbrIter.hasNext(); ) {
                            InternalPartitionDetails mbrDetails = mbrIter.next();
                            assertNotNull(mbrDetails);
                            DistributedMember mbr = mbrDetails.getDistributedMember();
                            assertNotNull(mbr);
                            int membersIdx = -1;
                            for (int idx = 0; idx < members.length; idx++) {
                                if (mbr.equals(members[idx])) {
                                    membersIdx = idx;
                                }
                            }
                            assertEquals(localMaxMemory[membersIdx] * 1024 * 1024, mbrDetails.getConfiguredMaxMemory());
                            assertEquals(memberSizes[membersIdx], mbrDetails.getSize());
                            assertEquals(memberBucketCounts[membersIdx], mbrDetails.getBucketCount());
                            assertEquals(memberPrimaryCounts[membersIdx], mbrDetails.getPrimaryCount());
                            PRLoad load = mbrDetails.getPRLoad();
                            assertNotNull(load);
                            assertEquals((float) localMaxMemory[membersIdx], load.getWeight(), 0);
                            int totalBucketBytes = 0;
                            int primaryCount = 0;
                            for (int bid = 0; bid < numBuckets[0]; bid++) {
                                long bucketBytes = mbrDetails.getBucketSize(bid);
                                assertTrue(bucketBytes >= 0);
                                totalBucketBytes += bucketBytes;
                                // validate against the PRLoad
                                assertEquals((float) bucketBytes, load.getReadLoad(bid), 0);
                                if (load.getWriteLoad(bid) > 0) {
                                    // found a primary
                                    primaryCount++;
                                }
                            }
                            // assertIndexDetailsEquals(memberSizes[membersIdx] * (1024* 1024),
                            // totalBucketBytes);
                            assertEquals(memberPrimaryCounts[membersIdx], primaryCount);
                            if (mbr.equals(getSystem().getDistributedMember())) {
                                // PartitionMemberDetails represents the local member
                                PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(details.getRegionPath());
                                assertEquals(pr.getLocalMaxMemory() * (1024L * 1024L), mbrDetails.getConfiguredMaxMemory());
                                PartitionedRegionDataStore ds = pr.getDataStore();
                                assertNotNull(ds);
                                assertEquals(getSize(ds), mbrDetails.getSize());
                                assertEquals(ds.getBucketsManaged(), mbrDetails.getBucketCount());
                                assertEquals(ds.getNumberOfPrimaryBucketsManaged(), mbrDetails.getPrimaryCount());
                            }
                        }
                    } else {
                        // found the other PR which has only one datastore and we know
                        // this system memberId is the only entry in mbrDetails
                        assertEquals(numBuckets[1], details.getConfiguredBucketCount());
                        assertEquals(0, details.getLowRedundancyBucketCount());
                        assertEquals(redundantCopies[1], details.getConfiguredRedundantCopies());
                        assertEquals(redundantCopies[1], details.getActualRedundantCopies());
                        assertNull(details.getColocatedWith());
                        Set<PartitionMemberInfo> memberDetails = details.getPartitionMemberInfo();
                        assertNotNull(memberDetails);
                        assertEquals(1, memberDetails.size());
                        PartitionMemberInfo mbrDetails = memberDetails.iterator().next();
                        assertEquals(getSystem().getDistributedMember(), mbrDetails.getDistributedMember());
                        PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(details.getRegionPath());
                        assertEquals(pr.getLocalMaxMemory() * (1024L * 1024L), mbrDetails.getConfiguredMaxMemory());
                        PartitionedRegionDataStore ds = pr.getDataStore();
                        assertNotNull(ds);
                        assertEquals(getSize(ds), mbrDetails.getSize());
                        assertEquals(ds.getBucketsManaged(), mbrDetails.getBucketCount());
                        assertEquals(ds.getNumberOfPrimaryBucketsManaged(), mbrDetails.getPrimaryCount());
                    }
                }
            }
        });
    }
    destroyRegions(0, regionPath);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) PRLoad(org.apache.geode.internal.cache.partitioned.PRLoad) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) InternalPRInfo(org.apache.geode.internal.cache.partitioned.InternalPRInfo) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) InternalPartitionDetails(org.apache.geode.internal.cache.partitioned.InternalPartitionDetails) Iterator(java.util.Iterator) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 24 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class MemoryThresholdsDUnitTest method testEventDelivery.

/**
   * Make sure appropriate events are delivered when moving between states.
   * 
   * @throws Exception
   */
// GEODE-427: random ports, time sensitive, waitForCriterions
@Category(FlakyTest.class)
@Test
public void testEventDelivery() throws Exception {
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(0);
    final VM server2 = host.getVM(1);
    final String regionName = "testEventDelivery";
    ServerPorts ports1 = startCacheServer(server1, 0f, 0f, regionName, false, /* createPR */
    false, /* notifyBySubscription */
    0);
    ServerPorts ports2 = startCacheServer(server2, 80f, 90f, regionName, false, /* createPR */
    false, /* notifyBySubscription */
    0);
    registerLoggingTestMemoryThresholdListener(server1);
    registerTestMemoryThresholdListener(server2);
    // NORMAL -> CRITICAL
    server2.invoke(new SerializableCallable("NORMAL->CRITICAL") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            getCache().getLoggerI18n().fine(addExpectedExString);
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(950);
            getCache().getLoggerI18n().fine(removeExpectedExString);
            return null;
        }
    });
    verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 1, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
    // make sure we get two events on remote server
    verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 1, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
    ;
    // CRITICAL -> EVICTION
    server2.invoke(new SerializableCallable("CRITICAL->EVICTION") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            getCache().getLoggerI18n().fine(addExpectedBelow);
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(850);
            getCache().getLoggerI18n().fine(removeExpectedBelow);
            return null;
        }
    });
    verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
    verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
    ;
    // EVICTION -> EVICTION
    server2.invoke(new SerializableCallable("EVICTION->EVICTION") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(840);
            return null;
        }
    });
    verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 0, true);
    verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 0, true);
    // EVICTION -> NORMAL
    server2.invoke(new SerializableCallable("EVICTION->NORMAL") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(750);
            return null;
        }
    });
    verifyListenerValue(server2, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 1, true);
    verifyListenerValue(server1, MemoryState.CRITICAL, 1, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 1, true);
    LogWriterUtils.getLogWriter().info("before NORMAL->CRITICAL->NORMAL");
    // NORMAL -> EVICTION -> NORMAL
    server2.invoke(new SerializableCallable("NORMAL->CRITICAL->NORMAL") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(950);
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(750);
            return null;
        }
    });
    LogWriterUtils.getLogWriter().info("after NORMAL->CRITICAL->NORMAL");
    verifyListenerValue(server2, MemoryState.CRITICAL, 2, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 3, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 2, true);
    verifyListenerValue(server1, MemoryState.CRITICAL, 2, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 3, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 2, true);
    // NORMAL -> EVICTION
    server2.invoke(new SerializableCallable("NORMAL->EVICTION") {

        public Object call() throws Exception {
            GemFireCacheImpl gfCache = (GemFireCacheImpl) getCache();
            gfCache.getInternalResourceManager().getHeapMonitor().updateStateAndSendEvent(850);
            return null;
        }
    });
    verifyListenerValue(server2, MemoryState.CRITICAL, 2, true);
    verifyListenerValue(server2, MemoryState.EVICTION, 4, true);
    verifyListenerValue(server2, MemoryState.NORMAL, 2, true);
    verifyListenerValue(server1, MemoryState.CRITICAL, 2, true);
    verifyListenerValue(server1, MemoryState.EVICTION, 4, true);
    verifyListenerValue(server1, MemoryState.NORMAL, 2, true);
}
Also used : VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) Host(org.apache.geode.test.dunit.Host) IgnoredException(org.apache.geode.test.dunit.IgnoredException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) LowMemoryException(org.apache.geode.cache.LowMemoryException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) CacheException(org.apache.geode.cache.CacheException) Category(org.junit.experimental.categories.Category) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Example 25 with GemFireCacheImpl

use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.

the class MemoryThresholdsDUnitTest method startCacheServer.

/**
   * Starts up a CacheServer.
   * 
   * @return a {@link ServerPorts} containing the CacheServer ports.
   */
private ServerPorts startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
    return (ServerPorts) server.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            getSystem(getServerProperties());
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            InternalResourceManager irm = cache.getInternalResourceManager();
            HeapMemoryMonitor hmm = irm.getHeapMonitor();
            hmm.setTestMaxMemoryBytes(1000);
            HeapMemoryMonitor.setTestBytesUsedForThresholdSet(500);
            irm.setEvictionHeapPercentage(evictionThreshold);
            irm.setCriticalHeapPercentage(criticalThreshold);
            AttributesFactory factory = new AttributesFactory();
            if (createPR) {
                PartitionAttributesFactory paf = new PartitionAttributesFactory();
                paf.setRedundantCopies(prRedundancy);
                paf.setTotalNumBuckets(11);
                factory.setPartitionAttributes(paf.create());
            } else {
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.REPLICATE);
            }
            Region region = createRegion(regionName, factory.create());
            if (createPR) {
                assertTrue(region instanceof PartitionedRegion);
            } else {
                assertTrue(region instanceof DistributedRegion);
            }
            CacheServer cacheServer = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailableTCPPorts(1)[0];
            cacheServer.setPort(port);
            cacheServer.setNotifyBySubscription(notifyBySubscription);
            cacheServer.start();
            return new ServerPorts(port);
        }
    });
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) HeapMemoryMonitor(org.apache.geode.internal.cache.control.HeapMemoryMonitor) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) InternalResourceManager(org.apache.geode.internal.cache.control.InternalResourceManager)

Aggregations

GemFireCacheImpl (org.apache.geode.internal.cache.GemFireCacheImpl)213 Test (org.junit.Test)127 Region (org.apache.geode.cache.Region)86 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)55 LocalRegion (org.apache.geode.internal.cache.LocalRegion)54 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)51 VM (org.apache.geode.test.dunit.VM)49 DistributedRegion (org.apache.geode.internal.cache.DistributedRegion)47 Host (org.apache.geode.test.dunit.Host)42 ClientCacheCreation (org.apache.geode.internal.cache.xmlcache.ClientCacheCreation)40 RegionAttributes (org.apache.geode.cache.RegionAttributes)39 CacheCreation (org.apache.geode.internal.cache.xmlcache.CacheCreation)35 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)33 CacheException (org.apache.geode.cache.CacheException)32 RegionCreation (org.apache.geode.internal.cache.xmlcache.RegionCreation)32 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)31 Properties (java.util.Properties)24 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Cache (org.apache.geode.cache.Cache)23 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)23