Search in sources :

Example 71 with PartitionedRegion

use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.

the class MemoryThresholdsDUnitTest method startCacheServer.

/**
   * Starts up a CacheServer.
   * 
   * @return a {@link ServerPorts} containing the CacheServer ports.
   */
private ServerPorts startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
    return (ServerPorts) server.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            getSystem(getServerProperties());
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            InternalResourceManager irm = cache.getInternalResourceManager();
            HeapMemoryMonitor hmm = irm.getHeapMonitor();
            hmm.setTestMaxMemoryBytes(1000);
            HeapMemoryMonitor.setTestBytesUsedForThresholdSet(500);
            irm.setEvictionHeapPercentage(evictionThreshold);
            irm.setCriticalHeapPercentage(criticalThreshold);
            AttributesFactory factory = new AttributesFactory();
            if (createPR) {
                PartitionAttributesFactory paf = new PartitionAttributesFactory();
                paf.setRedundantCopies(prRedundancy);
                paf.setTotalNumBuckets(11);
                factory.setPartitionAttributes(paf.create());
            } else {
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.REPLICATE);
            }
            Region region = createRegion(regionName, factory.create());
            if (createPR) {
                assertTrue(region instanceof PartitionedRegion);
            } else {
                assertTrue(region instanceof DistributedRegion);
            }
            CacheServer cacheServer = getCache().addCacheServer();
            int port = AvailablePortHelper.getRandomAvailableTCPPorts(1)[0];
            cacheServer.setPort(port);
            cacheServer.setNotifyBySubscription(notifyBySubscription);
            cacheServer.start();
            return new ServerPorts(port);
        }
    });
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CacheServer(org.apache.geode.cache.server.CacheServer) HeapMemoryMonitor(org.apache.geode.internal.cache.control.HeapMemoryMonitor) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) InternalResourceManager(org.apache.geode.internal.cache.control.InternalResourceManager)

Example 72 with PartitionedRegion

use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.

the class MemoryThresholdsDUnitTest method prRemotePutRejection.

private void prRemotePutRejection(boolean cacheClose, boolean localDestroy, final boolean useTx) throws Exception {
    final Host host = Host.getHost(0);
    final VM accessor = host.getVM(0);
    final VM server1 = host.getVM(1);
    final VM server2 = host.getVM(2);
    final VM server3 = host.getVM(3);
    final String regionName = "testPrRejection";
    final int redundancy = 1;
    final ServerPorts ports1 = startCacheServer(server1, 80f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    ServerPorts ports2 = startCacheServer(server2, 80f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    ServerPorts ports3 = startCacheServer(server3, 80f, 90f, regionName, true, /* createPR */
    false, /* notifyBySubscription */
    redundancy);
    accessor.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            getSystem(getServerProperties());
            getCache();
            AttributesFactory factory = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(redundancy);
            paf.setLocalMaxMemory(0);
            paf.setTotalNumBuckets(11);
            factory.setPartitionAttributes(paf.create());
            createRegion(regionName, factory.create());
            return null;
        }
    });
    doPuts(accessor, regionName, false, false);
    final Range r1 = Range.DEFAULT;
    doPutAlls(accessor, regionName, false, false, r1);
    SerializableCallable getMyId = new SerializableCallable() {

        public Object call() throws Exception {
            return ((GemFireCacheImpl) getCache()).getMyId();
        }
    };
    final DistributedMember server1Id = (DistributedMember) server1.invoke(getMyId);
    setUsageAboveCriticalThreshold(server1);
    accessor.invoke(new SerializableCallable() {

        public Object call() throws Exception {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion().getSubregion(regionName);
            final String regionPath = getRootRegion().getSubregion(regionName).getFullPath();
            // server1 is sick, look for a key on server1, and attempt put again
            WaitCriterion wc = new WaitCriterion() {

                public String description() {
                    return "remote bucket not marked sick";
                }

                public boolean done() {
                    boolean keyFoundOnSickMember = false;
                    boolean caughtException = false;
                    for (int i = 0; i < 20; i++) {
                        Integer key = Integer.valueOf(i);
                        int hKey = PartitionedRegionHelper.getHashKey(pr, null, key, null, null);
                        Set<InternalDistributedMember> owners = pr.getRegionAdvisor().getBucketOwners(hKey);
                        if (owners.contains(server1Id)) {
                            keyFoundOnSickMember = true;
                            try {
                                if (useTx)
                                    getCache().getCacheTransactionManager().begin();
                                pr.getCache().getLogger().fine("SWAP:putting in tx:" + useTx);
                                pr.put(key, "value");
                                if (useTx)
                                    getCache().getCacheTransactionManager().commit();
                            } catch (LowMemoryException ex) {
                                caughtException = true;
                                if (useTx)
                                    getCache().getCacheTransactionManager().rollback();
                            }
                        } else {
                            // puts on healthy member should continue
                            pr.put(key, "value");
                        }
                    }
                    return keyFoundOnSickMember && caughtException;
                }
            };
            Wait.waitForCriterion(wc, 30000, 10, true);
            return null;
        }
    });
    {
        Range r2 = new Range(r1, r1.width() + 1);
        doPutAlls(accessor, regionName, false, true, r2);
    }
    if (localDestroy) {
        // local destroy the region on sick member
        server1.invoke(new SerializableCallable("local destroy sick member") {

            public Object call() throws Exception {
                Region r = getRootRegion().getSubregion(regionName);
                LogWriterUtils.getLogWriter().info("PRLocalDestroy");
                r.localDestroyRegion();
                return null;
            }
        });
    } else if (cacheClose) {
        // close cache on sick member
        server1.invoke(new SerializableCallable("close cache sick member") {

            public Object call() throws Exception {
                getCache().close();
                return null;
            }
        });
    } else {
        setUsageBelowEviction(server1);
    }
    // do put all in a loop to allow distribution of message
    accessor.invoke(new SerializableCallable("Put in a loop") {

        public Object call() throws Exception {
            final Region r = getRootRegion().getSubregion(regionName);
            WaitCriterion wc = new WaitCriterion() {

                public String description() {
                    return "pr should have gone un-critical";
                }

                public boolean done() {
                    boolean done = true;
                    for (int i = 0; i < 20; i++) {
                        try {
                            r.put(i, "value");
                        } catch (LowMemoryException e) {
                            // expected
                            done = false;
                        }
                    }
                    return done;
                }
            };
            Wait.waitForCriterion(wc, 30000, 10, true);
            return null;
        }
    });
    doPutAlls(accessor, regionName, false, false, r1);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) Host(org.apache.geode.test.dunit.Host) IgnoredException(org.apache.geode.test.dunit.IgnoredException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) LowMemoryException(org.apache.geode.cache.LowMemoryException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) CacheException(org.apache.geode.cache.CacheException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) LowMemoryException(org.apache.geode.cache.LowMemoryException)

Example 73 with PartitionedRegion

use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.

the class CacheXml66DUnitTest method testPersistentPartition.

/**
   * Make sure you can create a persistent partitioned region from xml.
   */
@Test
public void testPersistentPartition() throws Exception {
    CacheCreation cache = new CacheCreation();
    RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
    attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    cache.createRegion("parRoot", attrs);
    Region r = cache.getRegion("parRoot");
    assertEquals(DataPolicy.PERSISTENT_PARTITION, r.getAttributes().getDataPolicy());
    testXml(cache);
    Cache c = getCache();
    assertNotNull(c);
    Region region = c.getRegion("parRoot");
    assertNotNull(region);
    assertEquals(DataPolicy.PERSISTENT_PARTITION, region.getAttributes().getDataPolicy());
    // since CacheTestCase remoteTearDown does not destroy PartitionedRegion
    region.localDestroyRegion();
}
Also used : RegionAttributesCreation(org.apache.geode.internal.cache.xmlcache.RegionAttributesCreation) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) CacheCreation(org.apache.geode.internal.cache.xmlcache.CacheCreation) ClientCacheCreation(org.apache.geode.internal.cache.xmlcache.ClientCacheCreation) Cache(org.apache.geode.cache.Cache) ClientCache(org.apache.geode.cache.client.ClientCache) Test(org.junit.Test)

Example 74 with PartitionedRegion

use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.

the class MultiVMRegionTestCase method testRemoteCacheWriter.

/**
   * Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
   * and {@link CacheWriterException}s are propagated appropriately.
   */
@Test
public void testRemoteCacheWriter() throws Exception {
    assertTrue(getRegionAttributes().getScope().isDistributed());
    final String name = this.getUniqueName();
    final Object key = "KEY";
    final Object oldValue = "OLD_VALUE";
    final Object newValue = "NEW_VALUE";
    final Object arg = "ARG";
    final Object exception = "EXCEPTION";
    final Object key2 = "KEY2";
    final Object value2 = "VALUE2";
    SerializableRunnable create = new CacheSerializableRunnable("Create Region") {

        @Override
        public void run2() throws CacheException {
            Region region = createRegion(name);
            // Put key2 in the region before any callbacks are
            // registered, so it can be destroyed later
            region.put(key2, value2);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                RegionEntry re = reRegion.getRegionEntry(key2);
                StoredObject so = (StoredObject) re._getValue();
                assertEquals(1, so.getRefCount());
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    };
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(create);
    vm1.invoke(create);
    //////// Create
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeCreate2(EntryEvent event) throws CacheWriterException {
                    if (exception.equals(event.getCallbackArgument())) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isCreate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(null, event.getOldValue());
                    assertEquals(oldValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
            flushIfNecessary(region);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, oldValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNull(region.getEntry(key));
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, oldValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                assertEquals(1, so.getRefCount());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Update
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isUpdate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(oldValue, event.getOldValue());
                    assertEquals(newValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, newValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                Region.Entry entry = region.getEntry(key);
                assertEquals(oldValue, entry.getValue());
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                    LocalRegion reRegion;
                    reRegion = (LocalRegion) region;
                    StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                    assertEquals(1, so.getRefCount());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, newValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(newValue, event.getOldValue());
                    assertNull(event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroy(key, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNotNull(region.getEntry(key));
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.destroy(key, arg);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Region Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isRegionDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroyRegion(exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                if (region.isDestroyed()) {
                    fail("should not have an exception if region is destroyed", ex);
                }
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
            region.destroyRegion(arg);
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                WaitCriterion waitForStatChange = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return ma.getStats().getObjects() == 0;
                    }

                    @Override
                    public String description() {
                        return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
                    }
                };
                Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) MemoryAllocatorImpl(org.apache.geode.internal.offheap.MemoryAllocatorImpl) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) RegionEvent(org.apache.geode.cache.RegionEvent) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) StoredObject(org.apache.geode.internal.offheap.StoredObject) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) EntryEvent(org.apache.geode.cache.EntryEvent) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RegionEntry(org.apache.geode.internal.cache.RegionEntry) StoredObject(org.apache.geode.internal.offheap.StoredObject) CacheWriterException(org.apache.geode.cache.CacheWriterException) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 75 with PartitionedRegion

use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.

the class PRBucketSynchronizationDUnitTest method verifySynchronized.

private void verifySynchronized(VM vm, final InternalDistributedMember crashedMember) {
    vm.invoke(new SerializableCallable("check that synchronization happened") {

        public Object call() throws Exception {
            PartitionedRegion pr = (PartitionedRegion) TestRegion;
            final BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
            Wait.waitForCriterion(new WaitCriterion() {

                String waitingFor = "primary is still in membership view: " + crashedMember;

                boolean dumped = false;

                public boolean done() {
                    if (TestRegion.getCache().getDistributionManager().isCurrentMember(crashedMember)) {
                        LogWriterUtils.getLogWriter().info(waitingFor);
                        return false;
                    }
                    if (!TestRegion.containsKey("Object3")) {
                        waitingFor = "entry for Object3 not found";
                        LogWriterUtils.getLogWriter().info(waitingFor);
                        return false;
                    }
                    RegionEntry re = bucket.getRegionMap().getEntry("Object5");
                    if (re == null) {
                        if (!dumped) {
                            dumped = true;
                            bucket.dumpBackingMap();
                        }
                        waitingFor = "entry for Object5 not found";
                        LogWriterUtils.getLogWriter().info(waitingFor);
                        return false;
                    }
                    if (!re.isTombstone()) {
                        if (!dumped) {
                            dumped = true;
                            bucket.dumpBackingMap();
                        }
                        waitingFor = "Object5 is not a tombstone but should be: " + re;
                        LogWriterUtils.getLogWriter().info(waitingFor);
                        return false;
                    }
                    return true;
                }

                public String description() {
                    return waitingFor;
                }
            }, 30000, 5000, true);
            return null;
        }
    });
}
Also used : WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) RegionEntry(org.apache.geode.internal.cache.RegionEntry) IgnoredException(org.apache.geode.test.dunit.IgnoredException)

Aggregations

PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)367 Test (org.junit.Test)135 Region (org.apache.geode.cache.Region)115 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)112 HashSet (java.util.HashSet)105 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)104 AttributesFactory (org.apache.geode.cache.AttributesFactory)86 VM (org.apache.geode.test.dunit.VM)86 Host (org.apache.geode.test.dunit.Host)85 ArrayList (java.util.ArrayList)77 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)75 Set (java.util.Set)69 IOException (java.io.IOException)66 Function (org.apache.geode.cache.execute.Function)66 FunctionException (org.apache.geode.cache.execute.FunctionException)63 List (java.util.List)62 RegionAttributes (org.apache.geode.cache.RegionAttributes)61 Iterator (java.util.Iterator)60 IgnoredException (org.apache.geode.test.dunit.IgnoredException)59 Cache (org.apache.geode.cache.Cache)56