Search in sources :

Example 26 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class MultiVMRegionTestCase method testNBRegionInvalidationDuringGetInitialImage.

@Ignore("TODO: test is disabled for 51542")
@Test
public void testNBRegionInvalidationDuringGetInitialImage() throws Exception {
    assumeTrue(supportsReplication());
    disconnectAllFromDS();
    // don't run this for noAck, too many race conditions
    if (getRegionAttributes().getScope().isDistributedNoAck())
        return;
    final String name = this.getUniqueName();
    final byte[][] values = new byte[NB1_NUM_ENTRIES][];
    for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
        values[i] = new byte[NB1_VALUE_SIZE];
        Arrays.fill(values[i], (byte) 0x42);
    }
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm2 = host.getVM(2);
    SerializableRunnable create = new CacheSerializableRunnable("Create Mirrored Region") {

        @Override
        public void run2() throws CacheException {
            beginCacheXml();
            {
                // root region must be DACK because its used to sync up async subregions
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.NORMAL);
                factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
                createRootRegion(factory.create());
            }
            {
                AttributesFactory factory = new AttributesFactory(getRegionAttributes());
                factory.setDataPolicy(DataPolicy.REPLICATE);
                createRegion(name, factory.create());
            }
            finishCacheXml(name);
            // reset slow
            org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
        }
    };
    vm0.invoke(new CacheSerializableRunnable("Create Nonmirrored Region") {

        @Override
        public void run2() throws CacheException {
            {
                // root region must be DACK because its used to sync up async subregions
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.EMPTY);
                createRootRegion(factory.create());
            }
            {
                AttributesFactory factory = new AttributesFactory(getRegionAttributes());
                factory.setDataPolicy(DataPolicy.REPLICATE);
                createRegion(name, factory.create());
            }
            // reset slow
            org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Put initial data") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
                region.put(new Integer(i), values[i]);
            }
            assertEquals(NB1_NUM_ENTRIES, region.keySet().size());
        }
    });
    // attachDebugger(vm0, "vm0");
    // attachDebugger(vm2, "vm2");
    // start asynchronous process that does updates to the data
    AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // wait for profile of getInitialImage cache to show up
            final org.apache.geode.internal.cache.CacheDistributionAdvisor adv = ((org.apache.geode.internal.cache.DistributedRegion) region).getCacheDistributionAdvisor();
            final int expectedProfiles = 1;
            WaitCriterion ev = new WaitCriterion() {

                @Override
                public boolean done() {
                    return adv.adviseReplicates().size() >= expectedProfiles;
                }

                @Override
                public String description() {
                    return "profile count never reached " + expectedProfiles;
                }
            };
            Wait.waitForCriterion(ev, 30 * 1000, 200, true);
            // before the get initial image is complete.
            for (int i = 1; i < NB1_NUM_ENTRIES; i += 2) {
                // as before
                if (i == 301) {
                    // DebuggerSupport.waitForJavaDebugger(getLogWriter(), "About to invalidate
                    // region");
                    // wait for previous updates to be processed
                    flushIfNecessary(region);
                    region.invalidateRegion();
                    flushIfNecessary(region);
                }
                Object key = new Integer(i);
                switch(i % 6) {
                    case // UPDATE
                    1:
                        // use the current timestamp so we know when it happened
                        // we could have used last modification timestamps, but
                        // this works without enabling statistics
                        Object value = new Long(System.currentTimeMillis());
                        region.put(key, value);
                        // }
                        break;
                    case // INVALIDATE
                    3:
                        region.invalidate(key);
                        if (getRegionAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            value = region.get(key);
                            assertNull("Expected null value for key: " + i + " but got " + value, value);
                        }
                        break;
                    case // DESTROY
                    5:
                        region.destroy(key);
                        if (getRegionAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            assertNull(region.get(key));
                        }
                        break;
                    default:
                        fail("unexpected modulus result: " + i);
                        break;
                }
            }
            // now do a put and our DACK root region which will not complete
            // until processed on otherside which means everything done before this
            // point has been processed
            getRootRegion().put("DONE", "FLUSH_OPS");
        }
    });
    // slow down image processing to make it more likely to get async updates
    if (!getRegionAttributes().getScope().isGlobal()) {
        vm2.invoke(new SerializableRunnable("Set slow image processing") {

            @Override
            public void run() {
                // make sure the cache is set up before turning on slow
                // image processing
                getRootRegion();
                // if this is a no_ack test, then we need to slow down more because of the
                // pauses in the nonblocking operations
                int pause = /* getRegionAttributes().getScope().isAck() ? */
                100;
                org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = pause;
            }
        });
    }
    AsyncInvocation asyncGII = vm2.invokeAsync(create);
    if (!getRegionAttributes().getScope().isGlobal()) {
        // wait for nonblocking operations to complete
        try {
            ThreadUtils.join(async, 30 * 1000);
        } finally {
            vm2.invoke(new SerializableRunnable("Set fast image processing") {

                @Override
                public void run() {
                    org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
                }
            });
        }
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
    }
    // wait for GII to complete
    ThreadUtils.join(asyncGII, 30 * 1000);
    final long iiComplete = System.currentTimeMillis();
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
    if (getRegionAttributes().getScope().isGlobal()) {
        // wait for nonblocking operations to complete
        ThreadUtils.join(async, 30 * 1000);
    }
    if (asyncGII.exceptionOccurred()) {
        throw new Error("asyncGII failed", asyncGII.getException());
    }
    if (async.exceptionOccurred()) {
        throw new Error("async failed", async.getException());
    }
    // Locally destroy the region in vm0 so we know that they are not found by
    // a netSearch
    vm0.invoke(new CacheSerializableRunnable("Locally destroy region") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.localDestroyRegion();
        }
    });
    // invoke repeating so noack regions wait for all updates to get processed
    vm2.invokeRepeatingIfNecessary(new CacheSerializableRunnable("Verify entryCount") {

        private boolean entriesDumped = false;

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // expected entry count (subtract entries destroyed)
            int entryCount = NB1_NUM_ENTRIES - NB1_NUM_ENTRIES / 6;
            int actualCount = region.entrySet(false).size();
            if (actualCount == NB1_NUM_ENTRIES) {
                // entries not destroyed, dump entries that were supposed to have been destroyed
                dumpDestroyedEntries(region);
            }
            assertEquals(entryCount, actualCount);
        }

        private void dumpDestroyedEntries(Region region) throws EntryNotFoundException {
            if (entriesDumped)
                return;
            entriesDumped = true;
            LogWriter logger = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
            logger.info("DUMPING Entries with values in VM that should have been destroyed:");
            for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
                logger.info(i + "-->" + ((org.apache.geode.internal.cache.LocalRegion) region).getValueInVM(new Integer(i)));
            }
        }
    }, 3000);
    vm2.invoke(new CacheSerializableRunnable("Verify keys/values & Nonblocking") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // expected entry count (subtract entries destroyed)
            int entryCount = NB1_NUM_ENTRIES - NB1_NUM_ENTRIES / 6;
            assertEquals(entryCount, region.entrySet(false).size());
            // determine how many entries were updated before getInitialImage
            // was complete
            int numConcurrent = 0;
            for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
                Region.Entry entry = region.getEntry(new Integer(i));
                if (i < 301) {
                    if (i % 6 == 5) {
                        // destroyed
                        assertNull("Expected entry for " + i + " to be destroyed but it is " + entry, entry);
                    } else {
                        assertNotNull(entry);
                        Object v = entry.getValue();
                        assertNull("Expected value for " + i + " to be null, but was " + v, v);
                    }
                } else {
                    Object v = entry == null ? null : entry.getValue();
                    switch(i % 6) {
                        // even keys are originals
                        case 0:
                        case 2:
                        case 4:
                            assertNotNull(entry);
                            assertNull("Expected value for " + i + " to be null, but was " + v, v);
                            break;
                        case // updated
                        1:
                            assertNotNull("Expected to find an entry for #" + i, entry);
                            assertNotNull("Expected to find a value for #" + i, v);
                            assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
                            Long timestamp = (Long) entry.getValue();
                            if (timestamp.longValue() < iiComplete) {
                                numConcurrent++;
                            }
                            break;
                        case // invalidated
                        3:
                            assertNotNull("Expected to find an entry for #" + i, entry);
                            assertNull("Expected value for " + i + " to be null, but was " + v, v);
                            break;
                        case // destroyed
                        5:
                            assertNull("Expected to not find an entry for #" + i, entry);
                            break;
                        default:
                            fail("unexpected modulus result: " + (i % 6));
                            break;
                    }
                }
            }
            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage");
            // make sure at least some of them were concurrent
            if (getRegionAttributes().getScope().isGlobal()) {
                assertTrue("Too many concurrent updates when expected to block: " + numConcurrent, numConcurrent < 300);
            } else {
                assertTrue("Not enough updates concurrent with getInitialImage occurred to my liking. " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage, and I'd expect at least 50 or so", numConcurrent >= 30);
            }
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) RegionEntry(org.apache.geode.internal.cache.RegionEntry) AttributesFactory(org.apache.geode.cache.AttributesFactory) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) LogWriter(org.apache.geode.LogWriter) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) StoredObject(org.apache.geode.internal.offheap.StoredObject) Ignore(org.junit.Ignore) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 27 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class MultiVMRegionTestCase method testEntryTtlLocalDestroy.

/**
   * Tests that an entry in a distributed region expires with a local destroy after a given time to
   * live.
   */
// GEODE-671: time sensitive, expiration, retry loop, async actions,
@Category(FlakyTest.class)
// waitForCriterion
@Test
public void testEntryTtlLocalDestroy() throws Exception {
    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
    final boolean mirrored = getRegionAttributes().getDataPolicy().withReplication();
    final boolean partitioned = getRegionAttributes().getPartitionAttributes() != null || getRegionAttributes().getDataPolicy().withPartitioning();
    if (!mirrored) {
        // This test fails intermittently because the DSClock we inherit from the existing
        // distributed system is stuck in the "stopped" state.
        // The DSClock is going away when java groups is merged and at that
        // time this following can be removed.
        disconnectAllFromDS();
    }
    final String name = this.getUniqueName();
    // ms
    final int timeout = 10;
    final Object key = "KEY";
    final Object value = "VALUE";
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    SerializableRunnable create = new CacheSerializableRunnable("Populate") {

        @Override
        public void run2() throws CacheException {
            System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
            try {
                Region region = createRegion(name);
            } finally {
                System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
            }
        }
    };
    vm1.invoke(create);
    vm0.invoke(new CacheSerializableRunnable("Create with TTL") {

        @Override
        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory(getRegionAttributes());
            factory.setStatisticsEnabled(true);
            ExpirationAttributes expire = new ExpirationAttributes(timeout, ExpirationAction.LOCAL_DESTROY);
            factory.setEntryTimeToLive(expire);
            if (!mirrored) {
                // be created here
                if (!partitioned) {
                    factory.setDataPolicy(DataPolicy.NORMAL);
                }
                factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
                factory.addCacheListener(new CountingDistCacheListener());
            }
            /**
         * Crank up the expiration so test runs faster. This property only needs to be set while the
         * region is created
         */
            System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
            try {
                createRegion(name, factory.create());
                if (mirrored)
                    fail("Should have thrown an IllegalStateException");
            } catch (IllegalStateException e) {
                if (!mirrored)
                    throw e;
            } finally {
                System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
            }
        }
    });
    if (mirrored)
        return;
    vm1.invoke(new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, value);
            return null;
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Check local destroy") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            // make sure we created the entry
            {
                CountingDistCacheListener l = (CountingDistCacheListener) region.getAttributes().getCacheListeners()[0];
                int retry = 1000;
                while (retry-- > 0) {
                    try {
                        l.assertCount(1, 0, 0, 0);
                        // TODO: a race exists in which assertCount may also see a destroyCount of 1
                        logger.info("DEBUG: saw create");
                        break;
                    } catch (AssertionError e) {
                        if (retry > 0) {
                            Wait.pause(1);
                        } else {
                            throw e;
                        }
                    }
                }
            }
            {
                // now make sure it expires
                // this should happen really fast since timeout is 10 ms.
                // But it may take longer in some cases because of thread
                // scheduling delays and machine load (see GEODE-410).
                // The previous code would fail after 100ms; now we wait 3000ms.
                WaitCriterion waitForUpdate = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        Region.Entry re = region.getEntry(key);
                        if (re != null) {
                            EntryExpiryTask eet = getEntryExpiryTask(region, key);
                            if (eet != null) {
                                long stopTime = ((InternalDistributedSystem) (region.getCache().getDistributedSystem())).getClock().getStopTime();
                                logger.info("DEBUG: waiting for expire destroy expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " stopTime=" + stopTime + " currentTimeMillis=" + System.currentTimeMillis());
                            } else {
                                logger.info("DEBUG: waiting for expire destroy but expiry task is null");
                            }
                        }
                        return re == null;
                    }

                    @Override
                    public String description() {
                        String expiryInfo = "";
                        try {
                            EntryExpiryTask eet = getEntryExpiryTask(region, key);
                            if (eet != null) {
                                expiryInfo = "expirationTime= " + eet.getExpirationTime() + " now=" + eet.getNow() + " currentTimeMillis=" + System.currentTimeMillis();
                            }
                        } catch (EntryNotFoundException ex) {
                            expiryInfo = "EntryNotFoundException when getting expiry task";
                        }
                        return "Entry for key " + key + " never expired (since it still exists) " + expiryInfo;
                    }
                };
                Wait.waitForCriterion(waitForUpdate, 30000, 1, true);
            }
            assertNull(region.getEntry(key));
        }
    });
    vm1.invoke(new CacheSerializableRunnable("Verify local") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            Region.Entry entry = region.getEntry(key);
            assertEquals(value, entry.getValue());
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) RegionEntry(org.apache.geode.internal.cache.RegionEntry) AttributesFactory(org.apache.geode.cache.AttributesFactory) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) ExpirationAttributes(org.apache.geode.cache.ExpirationAttributes) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) EntryExpiryTask(org.apache.geode.internal.cache.EntryExpiryTask) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) TimeoutException(org.apache.geode.cache.TimeoutException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InvalidDeltaException(org.apache.geode.InvalidDeltaException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) EntryExistsException(org.apache.geode.cache.EntryExistsException) CacheWriterException(org.apache.geode.cache.CacheWriterException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) StoredObject(org.apache.geode.internal.offheap.StoredObject) Category(org.junit.experimental.categories.Category) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 28 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class ProxyDUnitTest method distributedOps.

////////////////////// Test Methods //////////////////////
/**
   * check distributed ops that originate in a PROXY are correctly distributed to non-proxy regions.
   */
private void distributedOps(DataPolicy dp, InterestPolicy ip) throws CacheException {
    initOtherId();
    AttributesFactory af = new AttributesFactory();
    af.setDataPolicy(dp);
    af.setSubscriptionAttributes(new SubscriptionAttributes(ip));
    af.setScope(Scope.DISTRIBUTED_ACK);
    Region r = createRootRegion("ProxyDUnitTest", af.create());
    doCreateOtherVm();
    r.put("putkey", "putvalue1");
    getOtherVm().invoke(new CacheSerializableRunnable("check put") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(true, r.containsKey("putkey"));
            assertEquals("putvalue1", r.getEntry("putkey").getValue());
            r.put("putkey", "putvalue2");
        }
    });
    assertEquals(false, r.containsKey("putkey"));
    // netsearch
    assertEquals("putvalue2", r.get("putkey"));
    r.invalidate("putkey");
    getOtherVm().invoke(new CacheSerializableRunnable("check invalidate") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(true, r.containsKey("putkey"));
            assertEquals(null, r.getEntry("putkey").getValue());
        }
    });
    // invalid so total miss
    assertEquals(null, r.get("putkey"));
    r.destroy("putkey");
    getOtherVm().invoke(new CacheSerializableRunnable("check destroy") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(false, r.containsKey("putkey"));
        }
    });
    // total miss
    assertEquals(null, r.get("putkey"));
    r.create("createKey", "createValue1");
    getOtherVm().invoke(new CacheSerializableRunnable("check create") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(true, r.containsKey("createKey"));
            assertEquals("createValue1", r.getEntry("createKey").getValue());
        }
    });
    {
        Map m = new HashMap();
        m.put("putAllKey1", "putAllValue1");
        m.put("putAllKey2", "putAllValue2");
        r.putAll(m, "putAllCallback");
    }
    getOtherVm().invoke(new CacheSerializableRunnable("check putAll") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(true, r.containsKey("putAllKey1"));
            assertEquals("putAllValue1", r.getEntry("putAllKey1").getValue());
            assertEquals(true, r.containsKey("putAllKey2"));
            assertEquals("putAllValue2", r.getEntry("putAllKey2").getValue());
        }
    });
    r.clear();
    getOtherVm().invoke(new CacheSerializableRunnable("check clear") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(0, r.size());
        }
    });
    getOtherVm().invoke(new CacheSerializableRunnable("install CacheWriter") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            AttributesMutator am = r.getAttributesMutator();
            CacheWriter cw = new CacheWriterAdapter() {

                public void beforeCreate(EntryEvent event) throws CacheWriterException {
                    throw new CacheWriterException("expected");
                }
            };
            am.setCacheWriter(cw);
        }
    });
    try {
        r.put("putkey", "putvalue");
        fail("expected CacheWriterException");
    } catch (CacheWriterException expected) {
    }
    getOtherVm().invoke(new CacheSerializableRunnable("check clear") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(0, r.size());
        }
    });
    // total miss
    assertEquals(null, r.get("loadkey"));
    getOtherVm().invoke(new CacheSerializableRunnable("install CacheLoader") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            AttributesMutator am = r.getAttributesMutator();
            // clear csche writer
            am.setCacheWriter(null);
            CacheLoader cl = new CacheLoader() {

                public Object load(LoaderHelper helper) throws CacheLoaderException {
                    if (helper.getKey().equals("loadkey")) {
                        return "loadvalue";
                    } else if (helper.getKey().equals("loadexception")) {
                        throw new CacheLoaderException("expected");
                    } else {
                        return null;
                    }
                }

                public void close() {
                }
            };
            am.setCacheLoader(cl);
        }
    });
    // net load
    assertEquals("loadvalue", r.get("loadkey"));
    // total miss
    assertEquals(null, r.get("foobar"));
    try {
        r.get("loadexception");
        fail("expected CacheLoaderException");
    } catch (CacheLoaderException expected) {
    }
    r.destroyRegion();
    getOtherVm().invoke(new CacheSerializableRunnable("check clear") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            assertEquals(null, r);
        }
    });
}
Also used : CacheWriterAdapter(org.apache.geode.cache.util.CacheWriterAdapter) CacheException(org.apache.geode.cache.CacheException) HashMap(java.util.HashMap) LoaderHelper(org.apache.geode.cache.LoaderHelper) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) EntryEvent(org.apache.geode.cache.EntryEvent) CacheWriter(org.apache.geode.cache.CacheWriter) Region(org.apache.geode.cache.Region) CacheLoader(org.apache.geode.cache.CacheLoader) HashMap(java.util.HashMap) Map(java.util.Map) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) AttributesMutator(org.apache.geode.cache.AttributesMutator) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 29 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class DistributedTransactionDUnitTest method createRegions.

void createRegions(boolean accessor, int redundantCopies, InterestPolicy interestPolicy) {
    AttributesFactory af = new AttributesFactory();
    af.setScope(Scope.DISTRIBUTED_ACK);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    // af.setConcurrencyChecksEnabled(getConcurrencyChecksEnabled());
    getCache().createRegion(D_REFERENCE, af.create());
    af = new AttributesFactory();
    // af.setConcurrencyChecksEnabled(getConcurrencyChecksEnabled());
    af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    if (interestPolicy != null) {
        af.setSubscriptionAttributes(new SubscriptionAttributes(interestPolicy));
    }
    af.setPartitionAttributes(new PartitionAttributesFactory<CustId, Customer>().setTotalNumBuckets(4).setLocalMaxMemory(accessor ? 0 : 1).setPartitionResolver(new CustomerIDPartitionResolver("resolver1")).setRedundantCopies(redundantCopies).create());
    getCache().createRegion(CUSTOMER_PR, af.create());
    af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    af.setPartitionAttributes(new PartitionAttributesFactory<OrderId, Order>().setTotalNumBuckets(4).setLocalMaxMemory(accessor ? 0 : 1).setPartitionResolver(new CustomerIDPartitionResolver("resolver2")).setRedundantCopies(redundantCopies).setColocatedWith(CUSTOMER_PR).create());
    getCache().createRegion(ORDER_PR, af.create());
}
Also used : Order(org.apache.geode.internal.cache.execute.data.Order) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CustomerIDPartitionResolver(org.apache.geode.internal.cache.execute.CustomerIDPartitionResolver) OrderId(org.apache.geode.internal.cache.execute.data.OrderId) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes)

Example 30 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class ClientToServerDeltaDUnitTest method createClientCache.

/*
   * create client cache
   */
public static void createClientCache(String host, Integer port, Boolean attachListener, Boolean isEmpty, Boolean isCq, String[] cqQueryString, Boolean registerInterestAll, Boolean enableSubscription) throws Exception {
    updates = 0;
    create = 0;
    firstUpdate = null;
    secondUpdate = null;
    error = false;
    lastKeyReceived = false;
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    new ClientToServerDeltaDUnitTest().createCache(props);
    pool = (PoolImpl) PoolManager.createFactory().addServer(host, port.intValue()).setThreadLocalConnections(true).setMinConnections(2).setSubscriptionEnabled(enableSubscription).setSubscriptionRedundancy(0).setReadTimeout(10000).setPingInterval(1000).setSocketBufferSize(32768).create("ClientToServerDeltaDunitTestPool");
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.DISTRIBUTED_ACK);
    factory.setConcurrencyChecksEnabled(true);
    if (isEmpty) {
        factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
        factory.setDataPolicy(DataPolicy.EMPTY);
    } else {
        factory.setDataPolicy(DataPolicy.NORMAL);
    }
    factory.setPoolName(pool.getName());
    factory.setCloningEnabled(false);
    // region with empty data policy
    RegionAttributes attrs = factory.create();
    region = cache.createRegion(REGION_NAME, attrs);
    if (attachListener) {
        region.getAttributesMutator().addCacheListener(new CacheListenerAdapter() {

            @Override
            public void afterCreate(EntryEvent event) {
                create++;
                if (LAST_KEY.equals(event.getKey())) {
                    lastKeyReceived = true;
                }
                ;
            }

            @Override
            public void afterUpdate(EntryEvent event) {
                switch(updates) {
                    case 0:
                        // first delta
                        validateUpdates(event, firstUpdate, "FIRST");
                        updates++;
                        break;
                    case 1:
                        // combine delta
                        validateUpdates(event, firstUpdate, "FIRST");
                        validateUpdates(event, secondUpdate, "SECOND");
                        updates++;
                        break;
                    default:
                        break;
                }
            }
        });
    }
    if (registerInterestAll) {
        region.registerInterest("ALL_KEYS");
    }
    if (isCq) {
        CqAttributesFactory cqf = new CqAttributesFactory();
        CqListenerAdapter cqlist = new CqListenerAdapter() {

            @Override
            public void onEvent(CqEvent cqEvent) {
                Object key = cqEvent.getKey();
                if (LAST_KEY.equals(key)) {
                    lastKeyReceived = true;
                }
                logger.fine("CQ event received for (key, value): (" + key + ", " + cqEvent.getNewValue() + ")");
            }

            @Override
            public void onError(CqEvent cqEvent) {
                logger.fine("CQ error received for key: " + cqEvent.getKey());
            }
        };
        cqf.addCqListener(cqlist);
        CqAttributes cqa = cqf.create();
        for (int i = 0; i < cqQueryString.length; i++) {
            CqQuery cq = cache.getQueryService().newCq("Delta_Query_" + i, cqQueryString[i], cqa);
            cq.execute();
        }
    }
}
Also used : RegionAttributes(org.apache.geode.cache.RegionAttributes) Properties(java.util.Properties) CqListenerAdapter(org.apache.geode.cache.util.CqListenerAdapter) AttributesFactory(org.apache.geode.cache.AttributesFactory) CqAttributesFactory(org.apache.geode.cache.query.CqAttributesFactory) CacheListenerAdapter(org.apache.geode.cache.util.CacheListenerAdapter) CqEvent(org.apache.geode.cache.query.CqEvent) CqAttributes(org.apache.geode.cache.query.CqAttributes) EntryEvent(org.apache.geode.cache.EntryEvent) CqAttributesFactory(org.apache.geode.cache.query.CqAttributesFactory) CqQuery(org.apache.geode.cache.query.CqQuery) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes)

Aggregations

SubscriptionAttributes (org.apache.geode.cache.SubscriptionAttributes)35 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Region (org.apache.geode.cache.Region)16 Test (org.junit.Test)15 CacheException (org.apache.geode.cache.CacheException)13 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)12 VM (org.apache.geode.test.dunit.VM)11 EntryEvent (org.apache.geode.cache.EntryEvent)10 Properties (java.util.Properties)9 LocalRegion (org.apache.geode.internal.cache.LocalRegion)9 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)8 ConfigurationProperties (org.apache.geode.distributed.ConfigurationProperties)7 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)7 Host (org.apache.geode.test.dunit.Host)7 WaitCriterion (org.apache.geode.test.dunit.WaitCriterion)7 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)7 RegionAttributes (org.apache.geode.cache.RegionAttributes)6 CacheListenerAdapter (org.apache.geode.cache.util.CacheListenerAdapter)6 StoredObject (org.apache.geode.internal.offheap.StoredObject)6 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)6