Search in sources :

Example 11 with RegionFactory

use of org.apache.geode.cache.RegionFactory in project geode by apache.

the class MultiVMRegionTestCase method versionTestGIISendsTombstones.

/*
   * This test creates a server cache in vm0 and a peer cache in vm1. It then tests to see if GII
   * transferred tombstones to vm1 like it's supposed to. A client cache is created in vm2 and the
   * same sort of check is performed for register-interest.
   */
public void versionTestGIISendsTombstones() throws Exception {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
    // create replicated regions in VM 0 and 1, then perform concurrent ops
    // on the same key while creating the region in VM2. Afterward make
    // sure that all three regions are consistent
    final String name = this.getUniqueName() + "-CC";
    SerializableRunnable createRegion = new SerializableRunnable("Create Region") {

        @Override
        public void run() {
            try {
                RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
                CCRegion = (LocalRegion) f.create(name);
                if (VM.getCurrentVMNum() == 0) {
                    CacheServer bridge = CCRegion.getCache().addCacheServer();
                    bridge.setPort(serverPort);
                    try {
                        bridge.start();
                    } catch (IOException ex) {
                        fail("While creating bridge", ex);
                    }
                }
            } catch (CacheException ex) {
                fail("While creating region", ex);
            }
        }
    };
    SerializableRunnable asserter = new SerializableRunnable("ensure tombstone has been received") {

        @Override
        public void run() {
            RegionEntry entry = CCRegion.getRegionEntry("object2");
            assertTrue(entry != null);
            assertTrue(entry.isTombstone());
        }
    };
    vm0.invoke(createRegion);
    vm0.invoke(new SerializableRunnable("create some tombstones") {

        @Override
        public void run() {
            CCRegion.put("object1", "value1");
            CCRegion.put("object2", "value2");
            CCRegion.put("object3", "value3");
            CCRegion.destroy("object2");
        }
    });
    try {
        vm0.invoke(asserter);
        vm1.invoke(createRegion);
        vm1.invoke(asserter);
    // vm2.invoke(createClientCache);
    // vm2.invoke(asserter);
    } finally {
        disconnectAllFromDS();
    }
}
Also used : RegionFactory(org.apache.geode.cache.RegionFactory) CacheException(org.apache.geode.cache.CacheException) VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheServer(org.apache.geode.cache.server.CacheServer) RegionEntry(org.apache.geode.internal.cache.RegionEntry) Host(org.apache.geode.test.dunit.Host) IOException(java.io.IOException)

Example 12 with RegionFactory

use of org.apache.geode.cache.RegionFactory in project geode by apache.

the class MultiVMRegionTestCase method versionTestConcurrentEvents.

/**
   * This tests the concurrency versioning system to ensure that event conflation happens correctly
   * and that the statistic is being updated properly
   */
public void versionTestConcurrentEvents() throws Exception {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    // create replicated regions in VM 0 and 1, then perform concurrent ops
    // on the same key while creating the region in VM2. Afterward make
    // sure that all three regions are consistent
    final String name = this.getUniqueName() + "-CC";
    SerializableRunnable createRegion = new SerializableRunnable("Create Region") {

        @Override
        public void run() {
            try {
                RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
                CCRegion = (LocalRegion) f.create(name);
            } catch (CacheException ex) {
                fail("While creating region", ex);
            }
        }
    };
    vm0.invoke(createRegion);
    vm1.invoke(createRegion);
    SerializableRunnable performOps = new SerializableRunnable("perform concurrent ops") {

        @Override
        public void run() {
            try {
                doOpsLoop(5000, false);
                long events = CCRegion.getCachePerfStats().getConflatedEventsCount();
                if (!CCRegion.getScope().isGlobal()) {
                    assertTrue("expected some event conflation", events > 0);
                }
            } catch (CacheException e) {
                fail("while performing concurrent operations", e);
            }
        // } catch (InterruptedException e) {
        // fail("someone interrupted my sleep");
        // }
        }
    };
    AsyncInvocation a0 = vm0.invokeAsync(performOps);
    AsyncInvocation a1 = vm1.invokeAsync(performOps);
    try {
        Thread.sleep(500);
    } catch (InterruptedException e) {
        fail("sleep was interrupted");
    }
    vm2.invoke(createRegion);
    boolean a0failed = waitForAsyncProcessing(a0, "expected some event conflation");
    boolean a1failed = waitForAsyncProcessing(a1, "expected some event conflation");
    if (a0failed && a1failed) {
        fail("neither member saw event conflation - check stats for " + name);
    }
    // check consistency of the regions
    Map r0Contents = (Map) vm0.invoke(() -> this.getCCRegionContents());
    Map r1Contents = (Map) vm1.invoke(() -> this.getCCRegionContents());
    Map r2Contents = (Map) vm2.invoke(() -> this.getCCRegionContents());
    for (int i = 0; i < 10; i++) {
        String key = "cckey" + i;
        assertEquals("region contents are not consistent for " + key, r0Contents.get(key), r1Contents.get(key));
        assertEquals("region contents are not consistent for " + key, r1Contents.get(key), r2Contents.get(key));
        for (int subi = 1; subi < 3; subi++) {
            String subkey = key + "-" + subi;
            if (r0Contents.containsKey(subkey)) {
                assertEquals("region contents are not consistent for " + subkey, r0Contents.get(subkey), r1Contents.get(subkey));
                assertEquals("region contents are not consistent for " + subkey, r1Contents.get(subkey), r2Contents.get(subkey));
            } else {
                assertTrue(!r1Contents.containsKey(subkey));
            }
        }
    }
    if (!getRegionAttributes().getScope().isDistributedNoAck()) {
        // no-ack doesn't support deltas
        vm0.invoke(() -> this.clearCCRegion());
        performOps = new SerializableRunnable("perform concurrent delta ops") {

            @Override
            public void run() {
                try {
                    long stopTime = System.currentTimeMillis() + 5000;
                    Random ran = new Random(System.currentTimeMillis());
                    while (System.currentTimeMillis() < stopTime) {
                        for (int i = 0; i < 10; i++) {
                            CCRegion.put("cckey" + i, new DeltaValue("ccvalue" + ran.nextInt()));
                        }
                    }
                    long events = CCRegion.getCachePerfStats().getDeltaFailedUpdates();
                    assertTrue("expected some failed deltas", events > 0);
                } catch (CacheException e) {
                    fail("while performing concurrent operations", e);
                }
            }
        };
        a0 = vm0.invokeAsync(performOps);
        a1 = vm1.invokeAsync(performOps);
        a0failed = waitForAsyncProcessing(a0, "expected some failed deltas");
        a1failed = waitForAsyncProcessing(a1, "expected some failed deltas");
        if (a0failed && a1failed) {
            fail("neither member saw failed deltas - check stats for " + name);
        }
        // check consistency of the regions
        r0Contents = (Map) vm0.invoke(() -> this.getCCRegionContents());
        r1Contents = (Map) vm1.invoke(() -> this.getCCRegionContents());
        r2Contents = (Map) vm2.invoke(() -> this.getCCRegionContents());
        for (int i = 0; i < 10; i++) {
            String key = "cckey" + i;
            assertEquals("region contents are not consistent", r0Contents.get(key), r1Contents.get(key));
            assertEquals("region contents are not consistent", r1Contents.get(key), r2Contents.get(key));
            for (int subi = 1; subi < 3; subi++) {
                String subkey = key + "-" + subi;
                if (r0Contents.containsKey(subkey)) {
                    assertEquals("region contents are not consistent", r0Contents.get(subkey), r1Contents.get(subkey));
                    assertEquals("region contents are not consistent", r1Contents.get(subkey), r2Contents.get(subkey));
                } else {
                    assertTrue(!r1Contents.containsKey(subkey));
                }
            }
        }
        // The region version vectors should now all be consistent with the version stamps in the
        // entries.
        InternalDistributedMember vm0Id = (InternalDistributedMember) vm0.invoke(() -> this.getMemberId());
        InternalDistributedMember vm1Id = (InternalDistributedMember) vm1.invoke(() -> this.getMemberId());
        InternalDistributedMember vm2Id = (InternalDistributedMember) vm2.invoke(() -> this.getMemberId());
        long start = System.currentTimeMillis();
        RegionVersionVector vm0vv = getVersionVector(vm0);
        long end = System.currentTimeMillis();
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("version vector transmission took " + (end - start) + " ms");
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm0 vector = " + vm0vv);
        RegionVersionVector vm1vv = getVersionVector(vm1);
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm1 vector = " + vm1vv);
        RegionVersionVector vm2vv = getVersionVector(vm2);
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("vm2 vector = " + vm2vv);
        Map<String, VersionTag> vm0Versions = (Map<String, VersionTag>) vm0.invoke(() -> this.getCCRegionVersions());
        Map<String, VersionTag> vm1Versions = (Map<String, VersionTag>) vm1.invoke(() -> this.getCCRegionVersions());
        Map<String, VersionTag> vm2Versions = (Map<String, VersionTag>) vm2.invoke(() -> this.getCCRegionVersions());
        for (Map.Entry<String, VersionTag> entry : vm0Versions.entrySet()) {
            VersionTag tag = entry.getValue();
            tag.replaceNullIDs(vm0Id);
            assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
        }
        for (Map.Entry<String, VersionTag> entry : vm1Versions.entrySet()) {
            VersionTag tag = entry.getValue();
            tag.replaceNullIDs(vm1Id);
            assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
        }
        for (Map.Entry<String, VersionTag> entry : vm2Versions.entrySet()) {
            VersionTag tag = entry.getValue();
            tag.replaceNullIDs(vm2Id);
            assertTrue(vm0Id + " should contain " + tag, vm0vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm1Id + " should contain " + tag, vm1vv.contains(tag.getMemberID(), tag.getRegionVersion()));
            assertTrue(vm2Id + " should contain " + tag, vm2vv.contains(tag.getMemberID(), tag.getRegionVersion()));
        }
    }
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) RegionFactory(org.apache.geode.cache.RegionFactory) Random(java.util.Random) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) VM(org.apache.geode.test.dunit.VM) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) Map(java.util.Map) HashMap(java.util.HashMap)

Example 13 with RegionFactory

use of org.apache.geode.cache.RegionFactory in project geode by apache.

the class MultiVMRegionTestCase method versionTestConcurrentEventsOnEmptyRegion.

/**
   * This tests the concurrency versioning system to ensure that event conflation happens correctly
   * and that the statistic is being updated properly
   */
public void versionTestConcurrentEventsOnEmptyRegion() {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    // this VM, but treat as a remote for uniformity
    VM vm3 = host.getVM(3);
    // create an empty region in vm0 and replicated regions in VM 1 and 3,
    // then perform concurrent ops
    // on the same key while creating the region in VM2. Afterward make
    // sure that all three regions are consistent
    final String name = this.getUniqueName() + "-CC";
    SerializableRunnable createRegion = new SerializableRunnable("Create Region") {

        @Override
        public void run() {
            try {
                final RegionFactory f;
                if (VM.getCurrentVMNum() == 0) {
                    f = getCache().createRegionFactory(getRegionAttributes(RegionShortcut.REPLICATE_PROXY.toString()));
                } else {
                    f = getCache().createRegionFactory(getRegionAttributes());
                }
                CCRegion = (LocalRegion) f.create(name);
            } catch (CacheException ex) {
                fail("While creating region", ex);
            }
        }
    };
    vm0.invoke(createRegion);
    vm1.invoke(createRegion);
    vm3.invoke(createRegion);
    SerializableRunnable performOps = new SerializableRunnable("perform concurrent ops") {

        @Override
        public void run() {
            try {
                doOpsLoop(5000, false);
                sendSerialMessageToAll();
                if (CCRegion.getAttributes().getDataPolicy().withReplication()) {
                    long events = CCRegion.getCachePerfStats().getConflatedEventsCount();
                    assertTrue("expected some event conflation", events > 0);
                }
            } catch (CacheException e) {
                fail("while performing concurrent operations", e);
            }
        }
    };
    AsyncInvocation a0 = vm0.invokeAsync(performOps);
    AsyncInvocation a1 = vm1.invokeAsync(performOps);
    try {
        Thread.sleep(500);
    } catch (InterruptedException e) {
        fail("sleep was interrupted");
    }
    vm2.invoke(createRegion);
    boolean a0failed = waitForAsyncProcessing(a0, "expected some event conflation");
    boolean a1failed = waitForAsyncProcessing(a1, "expected some event conflation");
    if (a0failed && a1failed) {
        fail("neither member saw event conflation - check stats for " + name);
    }
    // check consistency of the regions
    Map r1Contents = (Map) vm1.invoke(() -> this.getCCRegionContents());
    Map r2Contents = (Map) vm2.invoke(() -> this.getCCRegionContents());
    Map r3Contents = (Map) vm3.invoke(() -> this.getCCRegionContents());
    for (int i = 0; i < 10; i++) {
        String key = "cckey" + i;
        assertEquals("region contents are not consistent for " + key, r1Contents.get(key), r2Contents.get(key));
        assertEquals("region contents are not consistent for " + key, r2Contents.get(key), r3Contents.get(key));
        for (int subi = 1; subi < 3; subi++) {
            String subkey = key + "-" + subi;
            if (r1Contents.containsKey(subkey)) {
                assertEquals("region contents are not consistent for " + subkey, r1Contents.get(subkey), r2Contents.get(subkey));
                assertEquals("region contents are not consistent for " + subkey, r2Contents.get(subkey), r3Contents.get(subkey));
            } else {
                assertTrue(!r2Contents.containsKey(subkey));
                assertTrue(!r3Contents.containsKey(subkey));
            }
        }
    }
}
Also used : RegionFactory(org.apache.geode.cache.RegionFactory) CacheException(org.apache.geode.cache.CacheException) VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) Map(java.util.Map) HashMap(java.util.HashMap)

Example 14 with RegionFactory

use of org.apache.geode.cache.RegionFactory in project geode by apache.

the class MultiVMRegionTestCase method versionTestTombstones.

public void versionTestTombstones() {
    disconnectAllFromDS();
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    final int numEntries = 100;
    // create replicated regions in VM 0 and 1, then perform concurrent ops
    // on the same key while creating the region in VM2. Afterward make
    // sure that all three regions are consistent
    final long oldServerTimeout = TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT;
    final long oldClientTimeout = TombstoneService.NON_REPLICATE_TOMBSTONE_TIMEOUT;
    final int oldExpiredTombstoneLimit = TombstoneService.EXPIRED_TOMBSTONE_LIMIT;
    final boolean oldIdleExpiration = TombstoneService.IDLE_EXPIRATION;
    final double oldLimit = TombstoneService.GC_MEMORY_THRESHOLD;
    final long oldMaxSleepTime = TombstoneService.MAX_SLEEP_TIME;
    try {
        SerializableRunnable setTimeout = new SerializableRunnable() {

            @Override
            public void run() {
                TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 1000;
                TombstoneService.NON_REPLICATE_TOMBSTONE_TIMEOUT = 900;
                TombstoneService.EXPIRED_TOMBSTONE_LIMIT = numEntries;
                TombstoneService.IDLE_EXPIRATION = true;
                // turn this off so heap profile won't cause
                TombstoneService.GC_MEMORY_THRESHOLD = 0;
                // test to fail
                TombstoneService.MAX_SLEEP_TIME = 500;
            }
        };
        vm0.invoke(setTimeout);
        vm1.invoke(setTimeout);
        final String name = this.getUniqueName() + "-CC";
        SerializableRunnable createRegion = new SerializableRunnable("Create Region") {

            @Override
            public void run() {
                try {
                    RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
                    CCRegion = (LocalRegion) f.create(name);
                    for (int i = 0; i < numEntries; i++) {
                        CCRegion.put("cckey" + i, "ccvalue");
                    }
                    if (CCRegion.getScope().isDistributedNoAck()) {
                        // flush the ops
                        sendSerialMessageToAll();
                    }
                } catch (CacheException ex) {
                    fail("While creating region", ex);
                }
            }
        };
        vm0.invoke(createRegion);
        vm1.invoke(createRegion);
        vm0.invoke(new SerializableRunnable("destroy entries and check tombstone count") {

            @Override
            public void run() {
                try {
                    for (int i = 0; i < numEntries; i++) {
                        CCRegion.destroy("cckey" + i);
                        assertTrue("entry should not exist", !CCRegion.containsKey("cckey" + i));
                        assertTrue("entry should not contain a value", !CCRegion.containsValueForKey("cckey" + i));
                    }
                    checkCCRegionTombstoneCount("after destroys in this vm ", numEntries);
                    assertTrue("region should not contain a tombstone", !CCRegion.containsValue(Token.TOMBSTONE));
                    if (CCRegion.getScope().isDistributedNoAck()) {
                        // flush the ops
                        sendSerialMessageToAll();
                    }
                } catch (CacheException e) {
                    fail("while performing destroy operations", e);
                }
            }
        });
        vm1.invoke(new SerializableRunnable("check tombstone count(2)") {

            @Override
            public void run() {
                checkCCRegionTombstoneCount("after destroys in other vm ", numEntries);
                WaitCriterion waitForExpiration = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return CCRegion.getTombstoneCount() == 0;
                    }

                    @Override
                    public String description() {
                        return "Waiting for all tombstones to expire.  There are now " + CCRegion.getTombstoneCount() + " tombstones left out of " + numEntries + " initial tombstones. " + CCRegion.getCache().getTombstoneService();
                    }
                };
                try {
                    Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT + (TombstoneService.MAX_SLEEP_TIME * 9), 100, true);
                } catch (AssertionError e) {
                    CCRegion.dumpBackingMap();
                    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
                    throw e;
                }
            }
        });
        vm0.invoke(new SerializableRunnable("create/destroy entries and check tombstone count") {

            @Override
            public void run() {
                final int origCount = CCRegion.getTombstoneCount();
                try {
                    WaitCriterion waitForExpiration = new WaitCriterion() {

                        @Override
                        public boolean done() {
                            return CCRegion.getTombstoneCount() == 0;
                        }

                        @Override
                        public String description() {
                            return "Waiting for all tombstones to expire.  There are now " + CCRegion.getTombstoneCount() + " tombstones left out of " + origCount + " initial tombstones. " + CCRegion.getCache().getTombstoneService();
                        }
                    };
                    Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT + (TombstoneService.MAX_SLEEP_TIME * 9), 100, true);
                    logger.debug("creating tombstones.  current count={}", CCRegion.getTombstoneCount());
                    for (int i = 0; i < numEntries; i++) {
                        CCRegion.create("cckey" + i, i);
                        CCRegion.destroy("cckey" + i);
                    }
                    logger.debug("done creating tombstones.  current count={}", CCRegion.getTombstoneCount());
                    checkCCRegionTombstoneCount("after create+destroy in this vm ", numEntries);
                    assertEquals(0, CCRegion.size());
                    afterCreates = 0;
                    AttributesMutator m = CCRegion.getAttributesMutator();
                    m.addCacheListener(new CacheListenerAdapter() {

                        @Override
                        public void afterCreate(EntryEvent event) {
                            afterCreates++;
                        }
                    });
                    if (CCRegion.getScope().isDistributedNoAck()) {
                        // flush the ops
                        sendSerialMessageToAll();
                    }
                } catch (AssertionError e) {
                    CCRegion.dumpBackingMap();
                    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
                    throw e;
                } catch (CacheException e) {
                    fail("while performing create/destroy operations", e);
                }
            }
        });
        vm1.invoke(new SerializableRunnable("check tombstone count and install listener") {

            @Override
            public void run() {
                checkCCRegionTombstoneCount("after create+destroy in other vm ", numEntries);
                afterCreates = 0;
                AttributesMutator m = CCRegion.getAttributesMutator();
                m.addCacheListener(new CacheListenerAdapter() {

                    @Override
                    public void afterCreate(EntryEvent event) {
                        afterCreates++;
                    }
                });
            }
        });
        // Now check to see if tombstones are resurrected by a create.
        // The entries should be created okay and the callback should be afterCreate.
        // The tombstone count won't go down until the entries are swept, but then
        // the count should fall to zero.
        vm0.invoke(new SerializableRunnable("create entries and check afterCreate and tombstone count") {

            @Override
            public void run() {
                try {
                    for (int i = 0; i < numEntries; i++) {
                        CCRegion.create("cckey" + i, i);
                    }
                    checkCCRegionTombstoneCount("after create in this vm", 0);
                    assertEquals("expected " + numEntries + " afterCreates", numEntries, afterCreates);
                    assertEquals(numEntries, CCRegion.size());
                    if (CCRegion.getScope().isDistributedNoAck()) {
                        // flush the ops
                        sendSerialMessageToAll();
                    }
                    WaitCriterion waitForExpiration = new WaitCriterion() {

                        @Override
                        public boolean done() {
                            return CCRegion.getCache().getTombstoneService().getScheduledTombstoneCount() == 0;
                        }

                        @Override
                        public String description() {
                            return "Waiting for all scheduled tombstones to be removed.  There are now " + CCRegion.getCache().getTombstoneService().getScheduledTombstoneCount() + " tombstones left out of " + numEntries + " initial tombstones. " + CCRegion.getCache().getTombstoneService();
                        }
                    };
                    Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT * 5, 100, true);
                } catch (CacheException e) {
                    fail("while performing create operations", e);
                }
            }
        });
        vm1.invoke(new SerializableRunnable("check afterCreate and tombstone count") {

            @Override
            public void run() {
                checkCCRegionTombstoneCount("after create in other vm", 0);
                assertEquals("expected " + numEntries + " afterCreates", numEntries, afterCreates);
                assertEquals(numEntries, CCRegion.size());
                WaitCriterion waitForExpiration = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return CCRegion.getCache().getTombstoneService().getScheduledTombstoneCount() == 0;
                    }

                    @Override
                    public String description() {
                        return "Waiting for all scheduled tombstones to be removed.  There are now " + CCRegion.getCache().getTombstoneService().getScheduledTombstoneCount() + " tombstones left out of " + numEntries + " initial tombstones. " + CCRegion.getCache().getTombstoneService();
                    }
                };
                Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT * 5, 100, true);
            }
        });
    } finally {
        SerializableRunnable resetTimeout = new SerializableRunnable() {

            @Override
            public void run() {
                TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = oldServerTimeout;
                TombstoneService.NON_REPLICATE_TOMBSTONE_TIMEOUT = oldClientTimeout;
                TombstoneService.EXPIRED_TOMBSTONE_LIMIT = oldExpiredTombstoneLimit;
                TombstoneService.IDLE_EXPIRATION = oldIdleExpiration;
                TombstoneService.GC_MEMORY_THRESHOLD = oldLimit;
                TombstoneService.MAX_SLEEP_TIME = oldMaxSleepTime;
            }
        };
        vm0.invoke(resetTimeout);
        vm1.invoke(resetTimeout);
    }
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) RegionFactory(org.apache.geode.cache.RegionFactory) CacheListenerAdapter(org.apache.geode.cache.util.CacheListenerAdapter) VM(org.apache.geode.test.dunit.VM) EntryEvent(org.apache.geode.cache.EntryEvent) AttributesMutator(org.apache.geode.cache.AttributesMutator)

Example 15 with RegionFactory

use of org.apache.geode.cache.RegionFactory in project geode by apache.

the class BackupDUnitTest method createOverflowRegion.

protected void createOverflowRegion(final VM vm) {
    SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            dsf.setDiskDirs(getDiskDirs(getUniqueName()));
            dsf.setMaxOplogSize(1);
            DiskStore ds = dsf.create(getUniqueName());
            RegionFactory rf = new RegionFactory();
            rf.setDiskStoreName(ds.getName());
            rf.setDiskSynchronous(true);
            rf.setDataPolicy(DataPolicy.REPLICATE);
            rf.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK));
            rf.create("region3");
        }
    };
    vm.invoke(createRegion);
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) RegionFactory(org.apache.geode.cache.RegionFactory) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Cache(org.apache.geode.cache.Cache)

Aggregations

RegionFactory (org.apache.geode.cache.RegionFactory)124 Region (org.apache.geode.cache.Region)63 Cache (org.apache.geode.cache.Cache)57 Test (org.junit.Test)54 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)51 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)44 VM (org.apache.geode.test.dunit.VM)44 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)31 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)31 CacheException (org.apache.geode.cache.CacheException)30 Host (org.apache.geode.test.dunit.Host)28 Properties (java.util.Properties)25 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)22 File (java.io.File)21 DiskStore (org.apache.geode.cache.DiskStore)20 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)20 ConfigurationProperties (org.apache.geode.distributed.ConfigurationProperties)20 AttributesFactory (org.apache.geode.cache.AttributesFactory)18 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)17 AsyncInvocation (org.apache.geode.test.dunit.AsyncInvocation)17