Search in sources :

Example 6 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class CacheXmlGenerator method generate.

/**
   * Generates XML for region attributes.
   *
   * @param id The id of the named region attributes (may be <code>null</code>)
   */
private void generate(String id, RegionAttributes attrs) throws SAXException {
    AttributesImpl atts = new AttributesImpl();
    if (id != null) {
        atts.addAttribute("", "", ID, "", id);
    }
    // point, the refid information is lost.
    if (attrs instanceof RegionAttributesCreation) {
        String refId = ((RegionAttributesCreation) attrs).getRefid();
        if (refId != null) {
            atts.addAttribute("", "", REFID, "", refId);
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasScope())) {
        String scopeString;
        Scope scope = attrs.getScope();
        if (scope.equals(Scope.LOCAL)) {
            scopeString = LOCAL;
        } else if (scope.equals(Scope.DISTRIBUTED_NO_ACK)) {
            scopeString = DISTRIBUTED_NO_ACK;
        } else if (scope.equals(Scope.DISTRIBUTED_ACK)) {
            scopeString = DISTRIBUTED_ACK;
        } else if (scope.equals(Scope.GLOBAL)) {
            scopeString = GLOBAL;
        } else {
            throw new InternalGemFireException(LocalizedStrings.CacheXmlGenerator_UNKNOWN_SCOPE_0.toLocalizedString(scope));
        }
        final boolean isPartitionedRegion;
        if (attrs instanceof RegionAttributesCreation) {
            RegionAttributesCreation rac = (RegionAttributesCreation) attrs;
            isPartitionedRegion = rac.getPartitionAttributes() != null || (rac.hasDataPolicy() && rac.getDataPolicy().withPartitioning());
        } else {
            isPartitionedRegion = attrs.getPartitionAttributes() != null || attrs.getDataPolicy().withPartitioning();
        }
        if (!isPartitionedRegion) {
            // Partitioned Region don't support setting scope
            if (generateDefaults() || !scope.equals(AbstractRegion.DEFAULT_SCOPE))
                atts.addAttribute("", "", SCOPE, "", scopeString);
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEarlyAck())) {
        if (generateDefaults() || attrs.getEarlyAck())
            atts.addAttribute("", "", EARLY_ACK, "", String.valueOf(attrs.getEarlyAck()));
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasMulticastEnabled())) {
        if (generateDefaults() || attrs.getMulticastEnabled())
            atts.addAttribute("", "", MULTICAST_ENABLED, "", String.valueOf(attrs.getMulticastEnabled()));
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasPublisher())) {
        if (generateDefaults() || attrs.getPublisher())
            atts.addAttribute("", "", PUBLISHER, "", String.valueOf(attrs.getPublisher()));
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEnableAsyncConflation())) {
        if (generateDefaults() || attrs.getEnableAsyncConflation())
            atts.addAttribute("", "", ENABLE_ASYNC_CONFLATION, "", String.valueOf(attrs.getEnableAsyncConflation()));
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEnableSubscriptionConflation())) {
            if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_7) >= 0) {
                // starting with 5.7 it is enable-subscription-conflation
                if (generateDefaults() || attrs.getEnableSubscriptionConflation())
                    atts.addAttribute("", "", ENABLE_SUBSCRIPTION_CONFLATION, "", String.valueOf(attrs.getEnableSubscriptionConflation()));
            } else {
                // before 5.7 it was enable-bridge-conflation
                if (generateDefaults() || attrs.getEnableSubscriptionConflation())
                    atts.addAttribute("", "", ENABLE_BRIDGE_CONFLATION, "", String.valueOf(attrs.getEnableSubscriptionConflation()));
            }
        }
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasDataPolicy())) {
            String dpString;
            DataPolicy dp = attrs.getDataPolicy();
            if (dp.isEmpty()) {
                dpString = EMPTY_DP;
            } else if (dp.isNormal()) {
                dpString = NORMAL_DP;
            } else if (dp.isPreloaded()) {
                dpString = PRELOADED_DP;
            } else if (dp.isReplicate()) {
                dpString = REPLICATE_DP;
            } else if (dp == DataPolicy.PERSISTENT_REPLICATE) {
                dpString = PERSISTENT_REPLICATE_DP;
            } else if (dp == DataPolicy.PERSISTENT_PARTITION) {
                dpString = PERSISTENT_PARTITION_DP;
            } else if (dp.isPartition()) {
                if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_1) >= 0) {
                    dpString = PARTITION_DP;
                } else {
                    // prior to 5.1 the data policy for partitioned regions was EMPTY
                    dpString = EMPTY_DP;
                }
            } else {
                throw new InternalGemFireException(LocalizedStrings.CacheXmlGenerator_UNKNOWN_DATA_POLICY_0.toLocalizedString(dp));
            }
            if (generateDefaults() || !dp.equals(DataPolicy.DEFAULT))
                atts.addAttribute("", "", DATA_POLICY, "", dpString);
        }
    // hasDataPolicy
    } else // GEMFIRE_5_0 >= 0
    {
        // GEMFIRE_5_0 < 0
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEnableSubscriptionConflation())) {
            if (generateDefaults() || attrs.getEnableSubscriptionConflation())
                atts.addAttribute("", "", "enable-conflation", "", String.valueOf(attrs.getEnableSubscriptionConflation()));
        }
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasMirrorType())) {
            String mirrorString;
            MirrorType mirror = attrs.getMirrorType();
            if (mirror.equals(MirrorType.NONE))
                mirrorString = NONE;
            else if (mirror.equals(MirrorType.KEYS))
                mirrorString = KEYS;
            else if (mirror.equals(MirrorType.KEYS_VALUES))
                mirrorString = KEYS_VALUES;
            else
                throw new InternalGemFireException(LocalizedStrings.CacheXmlGenerator_UNKNOWN_MIRROR_TYPE_0.toLocalizedString(mirror));
            atts.addAttribute("", "", MIRROR_TYPE, "", mirrorString);
        }
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasPersistBackup())) {
            atts.addAttribute("", "", PERSIST_BACKUP, "", String.valueOf(attrs.getDataPolicy() == DataPolicy.PERSISTENT_REPLICATE));
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasInitialCapacity())) {
        if (generateDefaults() || attrs.getInitialCapacity() != 16)
            atts.addAttribute("", "", INITIAL_CAPACITY, "", String.valueOf(attrs.getInitialCapacity()));
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasLoadFactor())) {
        if (generateDefaults() || attrs.getLoadFactor() != 0.75f)
            atts.addAttribute("", "", LOAD_FACTOR, "", String.valueOf(attrs.getLoadFactor()));
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasConcurrencyLevel())) {
        if (generateDefaults() || attrs.getConcurrencyLevel() != 16)
            atts.addAttribute("", "", CONCURRENCY_LEVEL, "", String.valueOf(attrs.getConcurrencyLevel()));
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_7_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasConcurrencyChecksEnabled())) {
            if (generateDefaults() || attrs.getConcurrencyChecksEnabled() != true)
                /* fixes bug 46654 */
                atts.addAttribute("", "", CONCURRENCY_CHECKS_ENABLED, "", String.valueOf(attrs.getConcurrencyChecksEnabled()));
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasStatisticsEnabled())) {
        if (generateDefaults() || attrs.getStatisticsEnabled())
            atts.addAttribute("", "", STATISTICS_ENABLED, "", String.valueOf(attrs.getStatisticsEnabled()));
    }
    if (!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasIgnoreJTA()) {
        if (generateDefaults() || attrs.getIgnoreJTA())
            atts.addAttribute("", "", IGNORE_JTA, "", String.valueOf(attrs.getIgnoreJTA()));
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_4_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasIsLockGrantor())) {
            if (generateDefaults() || attrs.isLockGrantor())
                atts.addAttribute("", "", IS_LOCK_GRANTOR, "", String.valueOf(attrs.isLockGrantor()));
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_7) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasPoolName())) {
            String cpVal = attrs.getPoolName();
            if (cpVal == null) {
                cpVal = "";
            }
            if (generateDefaults() || !cpVal.equals(""))
                atts.addAttribute("", "", POOL_NAME, "", cpVal);
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_6_5) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasDiskStoreName())) {
            String dsVal = attrs.getDiskStoreName();
            if (dsVal != null) {
                atts.addAttribute("", "", DISK_STORE_NAME, "", dsVal);
            }
        }
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasDiskSynchronous())) {
            if (generateDefaults() || attrs.isDiskSynchronous() != AttributesFactory.DEFAULT_DISK_SYNCHRONOUS)
                atts.addAttribute("", "", DISK_SYNCHRONOUS, "", String.valueOf(attrs.isDiskSynchronous()));
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_6_1) >= 0)
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasCloningEnabled())) {
            if (generateDefaults() || attrs.getCloningEnabled())
                atts.addAttribute("", "", CLONING_ENABLED, "", String.valueOf(attrs.getCloningEnabled()));
        }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_7_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasGatewaySenderId())) {
            Set<String> senderIds = new HashSet<String>(attrs.getGatewaySenderIds());
            StringBuilder senderStringBuff = new StringBuilder();
            if (senderIds != null && senderIds.size() != 0) {
                for (String senderId : senderIds) {
                    if (!(senderStringBuff.length() == 0)) {
                        senderStringBuff.append(",");
                    }
                    senderStringBuff.append(senderId);
                }
            }
            if (generateDefaults() || senderStringBuff.length() > 0)
                atts.addAttribute("", "", GATEWAY_SENDER_IDS, "", senderStringBuff.toString());
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_7_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasAsyncEventListeners())) {
            Set<String> asyncEventQueueIds = new HashSet<String>(attrs.getAsyncEventQueueIds());
            StringBuilder asyncEventQueueStringBuff = new StringBuilder();
            if (asyncEventQueueIds != null && asyncEventQueueIds.size() != 0) {
                for (String asyncEventQueueId : asyncEventQueueIds) {
                    if (!(asyncEventQueueStringBuff.length() == 0)) {
                        asyncEventQueueStringBuff.append(",");
                    }
                    asyncEventQueueStringBuff.append(asyncEventQueueId);
                }
            }
            if (generateDefaults() || asyncEventQueueStringBuff.length() > 0)
                atts.addAttribute("", "", ASYNC_EVENT_QUEUE_IDS, "", asyncEventQueueStringBuff.toString());
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEODE_1_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasOffHeap())) {
            if (generateDefaults() || attrs.getOffHeap()) {
                atts.addAttribute("", "", OFF_HEAP, "", String.valueOf(attrs.getOffHeap()));
            }
        }
    }
    handler.startElement("", REGION_ATTRIBUTES, REGION_ATTRIBUTES, atts);
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasKeyConstraint())) {
        generate(attrs.getKeyConstraint(), KEY_CONSTRAINT);
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasValueConstraint())) {
        generate(attrs.getValueConstraint(), VALUE_CONSTRAINT);
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasRegionTimeToLive())) {
        if (generateDefaults() || !attrs.getRegionTimeToLive().equals(ExpirationAttributes.DEFAULT))
            generate(REGION_TIME_TO_LIVE, attrs.getRegionTimeToLive(), null);
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasRegionIdleTimeout())) {
        if (generateDefaults() || !attrs.getRegionIdleTimeout().equals(ExpirationAttributes.DEFAULT))
            generate(REGION_IDLE_TIME, attrs.getRegionIdleTimeout(), null);
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEntryTimeToLive() || ((RegionAttributesCreation) attrs).hasCustomEntryTimeToLive())) {
        if (generateDefaults() || !attrs.getEntryTimeToLive().equals(ExpirationAttributes.DEFAULT) || attrs.getCustomEntryTimeToLive() != null)
            generate(ENTRY_TIME_TO_LIVE, attrs.getEntryTimeToLive(), attrs.getCustomEntryTimeToLive());
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEntryIdleTimeout() || ((RegionAttributesCreation) attrs).hasCustomEntryIdleTimeout())) {
        if (generateDefaults() || !attrs.getEntryIdleTimeout().equals(ExpirationAttributes.DEFAULT) || attrs.getCustomEntryIdleTimeout() != null)
            generate(ENTRY_IDLE_TIME, attrs.getEntryIdleTimeout(), attrs.getCustomEntryIdleTimeout());
    }
    if (attrs.getDiskStoreName() == null && (generateDefaults() || this.version.compareTo(CacheXmlVersion.GEMFIRE_6_5) < 0)) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasDiskWriteAttributes())) {
            generate(attrs.getDiskWriteAttributes());
        }
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasDiskDirs())) {
            File[] diskDirs = attrs.getDiskDirs();
            int[] diskSizes = attrs.getDiskDirSizes();
            if (diskDirs != null && diskDirs.length > 0) {
                handler.startElement("", DISK_DIRS, DISK_DIRS, EMPTY);
                for (int i = 0; i < diskDirs.length; i++) {
                    AttributesImpl diskAtts = new AttributesImpl();
                    if (diskSizes[i] != DiskStoreFactory.DEFAULT_DISK_DIR_SIZE) {
                        diskAtts.addAttribute("", "", DIR_SIZE, "", String.valueOf(diskSizes[i]));
                    }
                    handler.startElement("", DISK_DIR, DISK_DIR, diskAtts);
                    File dir = diskDirs[i];
                    String name = generateDefaults() ? dir.getAbsolutePath() : dir.getPath();
                    handler.characters(name.toCharArray(), 0, name.length());
                    handler.endElement("", DISK_DIR, DISK_DIR);
                }
                handler.endElement("", DISK_DIRS, DISK_DIRS);
            }
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasPartitionAttributes())) {
            PartitionAttributes p = attrs.getPartitionAttributes();
            if (p != null) {
                generate(p);
            }
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_0) >= 0) {
        MembershipAttributes p = attrs.getMembershipAttributes();
        if (p != null && p.hasRequiredRoles()) {
            generate(p);
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_5_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasSubscriptionAttributes())) {
            SubscriptionAttributes sa = attrs.getSubscriptionAttributes();
            if (sa != null) {
                if (generateDefaults() || !sa.equals(new SubscriptionAttributes()))
                    generate(sa);
            }
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasCacheLoader())) {
        generate(CACHE_LOADER, attrs.getCacheLoader());
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasCacheWriter())) {
        generate(CACHE_WRITER, attrs.getCacheWriter());
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasCacheListeners())) {
        CacheListener[] listeners = attrs.getCacheListeners();
        for (int i = 0; i < listeners.length; i++) {
            generate(CACHE_LISTENER, listeners[i]);
        }
    }
    if (this.version.compareTo(CacheXmlVersion.GEMFIRE_8_0) >= 0) {
        if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasCompressor())) {
            generate(COMPRESSOR, attrs.getCompressor());
        }
    }
    if ((!(attrs instanceof RegionAttributesCreation) || ((RegionAttributesCreation) attrs).hasEvictionAttributes())) {
        generate(attrs.getEvictionAttributes());
    }
    handler.endElement("", REGION_ATTRIBUTES, REGION_ATTRIBUTES);
}
Also used : InternalGemFireException(org.apache.geode.InternalGemFireException) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) FixedPartitionAttributes(org.apache.geode.cache.FixedPartitionAttributes) CacheListener(org.apache.geode.cache.CacheListener) DiskWriteAttributesImpl(org.apache.geode.internal.cache.DiskWriteAttributesImpl) PartitionAttributesImpl(org.apache.geode.internal.cache.PartitionAttributesImpl) AttributesImpl(org.xml.sax.helpers.AttributesImpl) Scope(org.apache.geode.cache.Scope) MirrorType(org.apache.geode.cache.MirrorType) DataPolicy(org.apache.geode.cache.DataPolicy) File(java.io.File) HashSet(java.util.HashSet) MembershipAttributes(org.apache.geode.cache.MembershipAttributes) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes)

Example 7 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class MultiVMRegionTestCase method testNonblockingGetInitialImage.

/**
   * Tests that distributed ack operations do not block while another cache is doing a
   * getInitialImage.
   */
@Test
public void testNonblockingGetInitialImage() throws Exception {
    assumeTrue(supportsReplication());
    // don't run this test if global scope since its too difficult to predict
    // how many concurrent operations will occur
    assumeFalse(getRegionAttributes().getScope().isGlobal());
    final String name = this.getUniqueName();
    final byte[][] values = new byte[NB1_NUM_ENTRIES][];
    for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
        values[i] = new byte[NB1_VALUE_SIZE];
        Arrays.fill(values[i], (byte) 0x42);
    }
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm2 = host.getVM(2);
    SerializableRunnable create = new CacheSerializableRunnable("Create Mirrored Region") {

        @Override
        public void run2() throws CacheException {
            beginCacheXml();
            {
                // root region must be DACK because its used to sync up async subregions
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.NORMAL);
                factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("MJT DEBUG: attrs0 are " + factory.create());
                createRootRegion(factory.create());
            }
            {
                AttributesFactory factory = new AttributesFactory(getRegionAttributes());
                factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
                if (getRegionAttributes().getDataPolicy() == DataPolicy.NORMAL) {
                    factory.setDataPolicy(DataPolicy.PRELOADED);
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("MJT DEBUG: attrs1 are " + factory.create());
                Region region = createRegion(name, factory.create());
            }
            finishCacheXml(name);
            // reset slow
            org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
        }
    };
    vm0.invoke(new CacheSerializableRunnable("Create Nonmirrored Region") {

        @Override
        public void run2() throws CacheException {
            {
                // root region must be DACK because its used to sync up async subregions
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.EMPTY);
                createRootRegion(factory.create());
            }
            {
                AttributesFactory factory = new AttributesFactory(getRegionAttributes());
                createRegion(name, factory.create());
            }
            // reset slow
            org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Put initial data") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
                region.put(new Integer(i), values[i]);
            }
            assertEquals(NB1_NUM_ENTRIES, region.keySet().size());
        }
    });
    // start asynchronous process that does updates to the data
    AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // wait for profile of getInitialImage cache to show up
            final org.apache.geode.internal.cache.CacheDistributionAdvisor adv = ((org.apache.geode.internal.cache.DistributedRegion) region).getCacheDistributionAdvisor();
            final int expectedProfiles = 1;
            WaitCriterion ev = new WaitCriterion() {

                @Override
                public boolean done() {
                    DataPolicy currentPolicy = getRegionAttributes().getDataPolicy();
                    if (currentPolicy == DataPolicy.PRELOADED) {
                        return (adv.advisePreloadeds().size() + adv.adviseReplicates().size()) >= expectedProfiles;
                    } else {
                        return adv.adviseReplicates().size() >= expectedProfiles;
                    }
                }

                @Override
                public String description() {
                    return "replicate count never reached " + expectedProfiles;
                }
            };
            Wait.waitForCriterion(ev, 60 * 1000, 200, true);
            DataPolicy currentPolicy = getRegionAttributes().getDataPolicy();
            int numProfiles = 0;
            if (currentPolicy == DataPolicy.PRELOADED) {
                numProfiles = adv.advisePreloadeds().size() + adv.adviseReplicates().size();
            } else {
                numProfiles = adv.adviseReplicates().size();
            }
            assertTrue(numProfiles >= expectedProfiles);
            // before the get initial image is complete.
            for (int i = 1; i < NB1_NUM_ENTRIES; i += 2) {
                Object key = new Integer(i);
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Operation #" + i + " on key " + key);
                switch(i % 6) {
                    case // UPDATE
                    1:
                        // use the current timestamp so we know when it happened
                        // we could have used last modification timestamps, but
                        // this works without enabling statistics
                        Object value = new Long(System.currentTimeMillis());
                        region.put(key, value);
                        // }
                        break;
                    case // INVALIDATE
                    3:
                        region.invalidate(key);
                        if (getRegionAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            assertNull(region.get(key));
                        }
                        break;
                    case // DESTROY
                    5:
                        region.destroy(key);
                        if (getRegionAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            assertNull(region.get(key));
                        }
                        break;
                    default:
                        fail("unexpected modulus result: " + i);
                        break;
                }
            }
            // add some new keys
            for (int i = NB1_NUM_ENTRIES; i < NB1_NUM_ENTRIES + 200; i++) {
                region.create(new Integer(i), new Long(System.currentTimeMillis()));
            }
            // now do a put and our DACK root region which will not complete
            // until processed on otherside which means everything done before this
            // point has been processed
            getRootRegion().put("DONE", "FLUSH_OPS");
        }
    });
    // slow down image processing to make it more likely to get async updates
    if (!getRegionAttributes().getScope().isGlobal()) {
        vm2.invoke(new SerializableRunnable("Set slow image processing") {

            @Override
            public void run() {
                // if this is a no_ack test, then we need to slow down more because of the
                // pauses in the nonblocking operations
                int pause = 200;
                org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = pause;
            }
        });
    }
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Before GetInitialImage, data policy is " + getRegionAttributes().getDataPolicy() + ", scope is " + getRegionAttributes().getScope());
    AsyncInvocation asyncGII = vm2.invokeAsync(create);
    if (!getRegionAttributes().getScope().isGlobal()) {
        // wait for nonblocking operations to complete
        ThreadUtils.join(async, 30 * 1000);
        vm2.invoke(new SerializableRunnable("Set fast image processing") {

            @Override
            public void run() {
                org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
            }
        });
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
    }
    // wait for GII to complete
    ThreadUtils.join(asyncGII, 30 * 1000);
    final long iiComplete = System.currentTimeMillis();
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
    if (getRegionAttributes().getScope().isGlobal()) {
        // wait for nonblocking operations to complete
        ThreadUtils.join(async, 30 * 1000);
    }
    if (async.exceptionOccurred()) {
        fail("async failed", async.getException());
    }
    if (asyncGII.exceptionOccurred()) {
        fail("asyncGII failed", asyncGII.getException());
    }
    // Locally destroy the region in vm0 so we know that they are not found by
    // a netSearch
    vm0.invoke(new CacheSerializableRunnable("Locally destroy region") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.localDestroyRegion();
        }
    });
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after localDestroyRegion");
    // invoke repeating so noack regions wait for all updates to get processed
    vm2.invokeRepeatingIfNecessary(new CacheSerializableRunnable("Verify entryCount") {

        boolean entriesDumped = false;

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // expected entry count (subtract entries destroyed)
            int entryCount = NB1_NUM_ENTRIES + 200 - NB1_NUM_ENTRIES / 6;
            int actualCount = region.entrySet(false).size();
            if (actualCount == NB1_NUM_ENTRIES + 200) {
                // entries not destroyed, dump entries that were supposed to have been destroyed
                dumpDestroyedEntries(region);
            }
            assertEquals(entryCount, actualCount);
        }

        private void dumpDestroyedEntries(Region region) throws EntryNotFoundException {
            if (entriesDumped)
                return;
            entriesDumped = true;
            LogWriter logger = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
            logger.info("DUMPING Entries with values in VM that should have been destroyed:");
            for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
                try {
                    logger.info(i + "-->" + ((org.apache.geode.internal.cache.LocalRegion) region).getValueInVM(new Integer(i)));
                } catch (EntryNotFoundException expected) {
                    logger.info(i + "-->" + "CORRECTLY DESTROYED");
                }
            }
        }
    }, 5000);
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after verify entryCount");
    vm2.invoke(new CacheSerializableRunnable("Verify keys/values & Nonblocking") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            // expected entry count (subtract entries destroyed)
            int entryCount = NB1_NUM_ENTRIES + 200 - NB1_NUM_ENTRIES / 6;
            assertEquals(entryCount, region.entrySet(false).size());
            // determine how many entries were updated before getInitialImage
            // was complete
            int numConcurrent = 0;
            for (int i = 0; i < NB1_NUM_ENTRIES + 200; i++) {
                Region.Entry entry = region.getEntry(new Integer(i));
                Object v = entry == null ? null : entry.getValue();
                if (i < NB1_NUM_ENTRIES) {
                    // old keys
                    switch(i % 6) {
                        // even keys are originals
                        case 0:
                        case 2:
                        case 4:
                            assertNotNull(entry);
                            assertTrue(Arrays.equals(values[i], (byte[]) v));
                            break;
                        case // updated
                        1:
                            assertNotNull(v);
                            assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
                            Long timestamp = (Long) entry.getValue();
                            if (timestamp.longValue() < iiComplete) {
                                numConcurrent++;
                            }
                            break;
                        case // invalidated
                        3:
                            assertNotNull(entry);
                            assertNull("Expected value for " + i + " to be null, but was " + v, v);
                            break;
                        case // destroyed
                        5:
                            assertNull(entry);
                            break;
                        default:
                            fail("unexpected modulus result: " + (i % 6));
                            break;
                    }
                } else {
                    // new keys
                    assertNotNull(v);
                    assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
                    Long timestamp = (Long) entry.getValue();
                    if (timestamp.longValue() < iiComplete) {
                        numConcurrent++;
                    }
                }
            }
            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage");
            // make sure at least some of them were concurrent
            if (region.getAttributes().getScope().isGlobal()) {
                assertTrue("Too many concurrent updates when expected to block: " + numConcurrent, numConcurrent < 10);
            } else {
                int min = 30;
                assertTrue("Not enough updates concurrent with getInitialImage occurred to my liking. " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage, and I'd expect at least " + min + " or so", numConcurrent >= min);
            }
        }
    });
    org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after verify key/values");
}
Also used : CacheException(org.apache.geode.cache.CacheException) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) RegionEntry(org.apache.geode.internal.cache.RegionEntry) AttributesFactory(org.apache.geode.cache.AttributesFactory) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) DataPolicy(org.apache.geode.cache.DataPolicy) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) LogWriter(org.apache.geode.LogWriter) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) StoredObject(org.apache.geode.internal.offheap.StoredObject) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 8 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class MultiVMRegionTestCase method testNBRegionDestructionDuringGetInitialImage.

@Test
public void testNBRegionDestructionDuringGetInitialImage() throws Exception {
    assumeTrue(supportsReplication());
    final String name = this.getUniqueName();
    final byte[][] values = new byte[NB1_NUM_ENTRIES][];
    for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
        values[i] = new byte[NB1_VALUE_SIZE];
        Arrays.fill(values[i], (byte) 0x42);
    }
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm2 = host.getVM(2);
    vm0.invoke(new CacheSerializableRunnable("Create Nonmirrored Region") {

        @Override
        public void run2() throws CacheException {
            {
                // root region must be DACK because its used to sync up async subregions
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.DISTRIBUTED_ACK);
                factory.setDataPolicy(DataPolicy.EMPTY);
                createRootRegion(factory.create());
            }
            {
                AttributesFactory factory = new AttributesFactory(getRegionAttributes());
                createRegion(name, factory.create());
            }
            // reset slow
            org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Put initial data") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
                region.put(new Integer(i), values[i]);
            }
            assertEquals(NB1_NUM_ENTRIES, region.keySet().size());
        }
    });
    // attachDebugger(vm0, "vm0");
    // attachDebugger(vm2, "vm2");
    // start asynchronous process that does updates to the data
    AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {

        @Override
        public void run2() throws CacheException {
            // give the gii guy a chance to start
            Wait.pause(200);
            Region region = getRootRegion().getSubregion(name);
            // wait for profile of getInitialImage cache to show up
            final org.apache.geode.internal.cache.CacheDistributionAdvisor adv = ((org.apache.geode.internal.cache.DistributedRegion) region).getCacheDistributionAdvisor();
            // int numProfiles;
            final int expectedProfiles = 1;
            WaitCriterion ev = new WaitCriterion() {

                @Override
                public boolean done() {
                    return expectedProfiles == adv.adviseReplicates().size();
                }

                @Override
                public String description() {
                    return "profile count never became exactly " + expectedProfiles;
                }
            };
            Wait.waitForCriterion(ev, 60 * 1000, 200, true);
            // before the get initial image is complete.
            for (int i = 1; i < 301; i += 2) {
                // getLogWriter().info("doing nonblocking op #"+i);
                Object key = new Integer(i);
                switch(i % 6) {
                    case // UPDATE
                    1:
                        // use the current timestamp so we know when it happened
                        // we could have used last modification timestamps, but
                        // this works without enabling statistics
                        Object value = new Long(System.currentTimeMillis());
                        region.put(key, value);
                        // }
                        break;
                    case // INVALIDATE
                    3:
                        region.invalidate(key);
                        if (region.getAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            value = region.get(key);
                            assertNull("Expected null value for key: " + i + " but got " + value, value);
                        }
                        break;
                    case // DESTROY
                    5:
                        region.destroy(key);
                        if (region.getAttributes().getScope().isDistributedAck()) {
                            // do a nonblocking netSearch
                            assertNull(region.get(key));
                        }
                        break;
                    default:
                        fail("unexpected modulus result: " + i);
                        break;
                }
            }
            // at magical number 301, do a region destruction
            // getLogWriter().info("doing destroyRegion");
            region.destroyRegion();
            // getLogWriter().info("finished destroyRegion");
            // now do a put and our DACK root region which will not complete
            // until processed on otherside which means everything done before this
            // point has been processed
            {
                Region rr = getRootRegion();
                if (rr != null) {
                    rr.put("DONE", "FLUSH_OPS");
                }
            }
        }
    });
    IgnoredException ex = IgnoredException.addIgnoredException("RegionDestroyedException");
    try {
        // in the meantime, do the get initial image in vm2
        AsyncInvocation asyncGII = vm2.invokeAsync(new CacheSerializableRunnable("Create Mirrored Region") {

            @Override
            public void run2() throws CacheException {
                if (!getRegionAttributes().getScope().isGlobal()) {
                    int pause = 200;
                    org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = pause;
                }
                beginCacheXml();
                {
                    // root region must be DACK because its used to sync up async subregions
                    AttributesFactory factory = new AttributesFactory();
                    factory.setScope(Scope.DISTRIBUTED_ACK);
                    factory.setDataPolicy(DataPolicy.NORMAL);
                    factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
                    createRootRegion(factory.create());
                }
                {
                    RegionAttributes ra = getRegionAttributes();
                    AttributesFactory factory = new AttributesFactory(ra);
                    if (ra.getDataPolicy().withPersistence()) {
                        factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
                    } else {
                        factory.setDataPolicy(DataPolicy.REPLICATE);
                    }
                    createRegion(name, factory.create());
                }
                finishCacheXml(name);
                // reset slow
                org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
                // if global scope, the region doesn't get destroyed until after region creation
                try {
                    Thread.sleep(3000);
                } catch (InterruptedException ie) {
                    fail("interrupted");
                }
                assertTrue(getRootRegion().getSubregion(name) == null || getRegionAttributes().getScope().isGlobal());
            }
        });
        if (getRegionAttributes().getScope().isGlobal()) {
            // wait for nonblocking operations to complete
            ThreadUtils.join(async, 30 * 1000);
            if (async.exceptionOccurred()) {
                fail("async invocation failed", async.getException());
            }
            vm2.invoke(new SerializableRunnable("Set fast image processing") {

                @Override
                public void run() {
                    org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
                }
            });
            org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
        }
        // wait for GII to complete
        // getLogWriter().info("starting wait for GetInitialImage Completion");
        ThreadUtils.join(asyncGII, 30 * 1000);
        org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
        if (getRegionAttributes().getScope().isGlobal()) {
            // wait for nonblocking operations to complete
            ThreadUtils.join(async, 30 * 1000);
        }
        if (async.exceptionOccurred()) {
            fail("async failed", async.getException());
        }
        if (asyncGII.exceptionOccurred()) {
            fail("asyncGII failed", asyncGII.getException());
        }
    } finally {
        ex.remove();
    }
}
Also used : CacheException(org.apache.geode.cache.CacheException) RegionAttributes(org.apache.geode.cache.RegionAttributes) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) AttributesFactory(org.apache.geode.cache.AttributesFactory) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) IgnoredException(org.apache.geode.test.dunit.IgnoredException) StoredObject(org.apache.geode.internal.offheap.StoredObject) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 9 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class MultiVMRegionTestCase method testEntryTtlDestroyEvent.

/**
   * Tests that an entry in a distributed region that expires with a distributed destroy causes an
   * event in other VM with isExpiration flag set.
   */
// GEODE-583: time sensitive, expiration, waitForCriterion, short
@Category(FlakyTest.class)
// timeouts
@Test
public void testEntryTtlDestroyEvent() throws Exception {
    assumeTrue(getRegionAttributes().getPartitionAttributes() == null);
    final String name = this.getUniqueName();
    // ms
    final int timeout = 22;
    final Object key = "KEY";
    final Object value = "VALUE";
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    class DestroyListener extends TestCacheListener {

        boolean eventIsExpiration = false;

        @Override
        public void afterDestroyBeforeAddEvent(EntryEvent event) {
            eventIsExpiration = event.getOperation().isExpiration();
        }

        @Override
        public void afterDestroy2(EntryEvent event) {
            if (event.isOriginRemote()) {
                assertTrue(!event.getDistributedMember().equals(getSystem().getDistributedMember()));
            } else {
                assertEquals(getSystem().getDistributedMember(), event.getDistributedMember());
            }
            assertEquals(Operation.EXPIRE_DESTROY, event.getOperation());
            assertEquals(value, event.getOldValue());
            eventIsExpiration = event.getOperation().isExpiration();
        }

        @Override
        public void afterCreate2(EntryEvent event) {
        // ignore
        }

        @Override
        public void afterUpdate2(EntryEvent event) {
        // ignore
        }
    }
    SerializableRunnable createRegion = new CacheSerializableRunnable("Create with Listener") {

        @Override
        public void run2() throws CacheException {
            AttributesFactory fac = new AttributesFactory(getRegionAttributes());
            fac.addCacheListener(destroyListener = new DestroyListener());
            createRegion(name, fac.create());
        }
    };
    vm1.invoke(createRegion);
    vm0.invoke(new CacheSerializableRunnable("Create with TTL") {

        @Override
        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory(getRegionAttributes());
            factory.setStatisticsEnabled(true);
            ExpirationAttributes expire = new ExpirationAttributes(timeout, ExpirationAction.DESTROY);
            factory.setEntryTimeToLive(expire);
            if (!getRegionAttributes().getDataPolicy().withReplication()) {
                factory.setDataPolicy(DataPolicy.NORMAL);
                factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
            }
            System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
            try {
                createRegion(name, factory.create());
                ExpiryTask.suspendExpiration();
            // suspend to make sure we can see that the put is distributed to this member
            } finally {
                System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
            }
        }
    });
    try {
        // let region create finish before doing put
        // pause(10);
        vm1.invoke(new SerializableCallable() {

            @Override
            public Object call() throws Exception {
                Region region = getRootRegion().getSubregion(name);
                DestroyListener dl = (DestroyListener) region.getAttributes().getCacheListeners()[0];
                dl.enableEventHistory();
                region.put(key, value);
                // reset listener after create event
                assertTrue(dl.wasInvoked());
                List<CacheEvent> history = dl.getEventHistory();
                CacheEvent ce = history.get(0);
                dl.disableEventHistory();
                assertEquals(Operation.CREATE, ce.getOperation());
                return null;
            }
        });
        vm0.invoke(new CacheSerializableRunnable("Check create received from vm1") {

            @Override
            public void run2() throws CacheException {
                final Region region = getRootRegion().getSubregion(name);
                WaitCriterion waitForCreate = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return region.getEntry(key) != null;
                    }

                    @Override
                    public String description() {
                        return "never saw create of " + key;
                    }
                };
                Wait.waitForCriterion(waitForCreate, 3000, 10, true);
            }
        });
    } finally {
        vm0.invoke(new CacheSerializableRunnable("resume expiration") {

            @Override
            public void run2() throws CacheException {
                ExpiryTask.permitExpiration();
            }
        });
    }
    // now wait for it to expire
    vm0.invoke(new CacheSerializableRunnable("Check local destroy") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            WaitCriterion waitForExpire = new WaitCriterion() {

                @Override
                public boolean done() {
                    return region.getEntry(key) == null;
                }

                @Override
                public String description() {
                    return "never saw expire of " + key + " entry=" + region.getEntry(key);
                }
            };
            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
        }
    });
    vm1.invoke(new CacheSerializableRunnable("Verify destroyed and event") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            WaitCriterion waitForExpire = new WaitCriterion() {

                @Override
                public boolean done() {
                    return region.getEntry(key) == null;
                }

                @Override
                public String description() {
                    return "never saw expire of " + key + " entry=" + region.getEntry(key);
                }
            };
            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
            assertTrue(destroyListener.waitForInvocation(555));
            assertTrue(((DestroyListener) destroyListener).eventIsExpiration);
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) TimeoutException(org.apache.geode.cache.TimeoutException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InvalidDeltaException(org.apache.geode.InvalidDeltaException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) EntryExistsException(org.apache.geode.cache.EntryExistsException) CacheWriterException(org.apache.geode.cache.CacheWriterException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) AttributesFactory(org.apache.geode.cache.AttributesFactory) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VM(org.apache.geode.test.dunit.VM) EntryEvent(org.apache.geode.cache.EntryEvent) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) CacheEvent(org.apache.geode.cache.CacheEvent) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) StoredObject(org.apache.geode.internal.offheap.StoredObject) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) ExpirationAttributes(org.apache.geode.cache.ExpirationAttributes) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) Category(org.junit.experimental.categories.Category) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 10 with SubscriptionAttributes

use of org.apache.geode.cache.SubscriptionAttributes in project geode by apache.

the class LRUEvictionControllerDUnitTest method testCCMirrored.

/**
   * Tests that a capacity controller with LOCAL_DESTROY eviction action cannot be installed into a
   * region
   */
@Test
public void testCCMirrored() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setOffHeap(isOffHeapEnabled());
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(10));
    factory.setDataPolicy(DataPolicy.REPLICATE);
    Region r = createRegion(name, factory.create());
    RegionAttributes ra = r.getAttributes();
    assertEquals(DataPolicy.PRELOADED, ra.getDataPolicy());
    assertEquals(new SubscriptionAttributes(InterestPolicy.ALL), ra.getSubscriptionAttributes());
    r.destroyRegion();
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

SubscriptionAttributes (org.apache.geode.cache.SubscriptionAttributes)35 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Region (org.apache.geode.cache.Region)16 Test (org.junit.Test)15 CacheException (org.apache.geode.cache.CacheException)13 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)12 VM (org.apache.geode.test.dunit.VM)11 EntryEvent (org.apache.geode.cache.EntryEvent)10 Properties (java.util.Properties)9 LocalRegion (org.apache.geode.internal.cache.LocalRegion)9 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)8 ConfigurationProperties (org.apache.geode.distributed.ConfigurationProperties)7 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)7 Host (org.apache.geode.test.dunit.Host)7 WaitCriterion (org.apache.geode.test.dunit.WaitCriterion)7 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)7 RegionAttributes (org.apache.geode.cache.RegionAttributes)6 CacheListenerAdapter (org.apache.geode.cache.util.CacheListenerAdapter)6 StoredObject (org.apache.geode.internal.offheap.StoredObject)6 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)6