Search in sources :

Example 11 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class PdxAttributesJUnitTest method testDuplicatePdxTypeId.

@Test
public void testDuplicatePdxTypeId() throws Exception {
    int dsId = 5;
    CacheFactory cf = new CacheFactory();
    cf.set(MCAST_PORT, "0");
    cf.set(ConfigurationProperties.DISTRIBUTED_SYSTEM_ID, String.valueOf(dsId));
    Cache cache = cf.create();
    // define a type.
    defineAType();
    Region pdxRegion = cache.getRegion(PeerTypeRegistration.REGION_NAME);
    Iterator itr = pdxRegion.entrySet().iterator();
    boolean foundException = false;
    boolean foundEnumException = false;
    while (itr.hasNext()) {
        Map.Entry ent = (Map.Entry) itr.next();
        if (ent.getKey() instanceof Integer) {
            int pdxTypeId = (int) ent.getKey();
            try {
                pdxRegion.put(pdxTypeId, new PdxType());
            } catch (CacheWriterException cwe) {
                foundException = true;
            }
        } else {
            EnumId enumId = (EnumId) ent.getKey();
            EnumInfo enumInfo = new EnumInfo(SimpleEnum.ONE);
            try {
                pdxRegion.put(enumId, enumInfo);
            } catch (CacheWriterException cwe) {
                foundEnumException = true;
            }
        }
    }
    assertEquals(true, foundException);
    assertEquals(true, foundEnumException);
    cache.close();
}
Also used : PdxType(org.apache.geode.pdx.internal.PdxType) EnumInfo(org.apache.geode.pdx.internal.EnumInfo) Iterator(java.util.Iterator) Region(org.apache.geode.cache.Region) CacheFactory(org.apache.geode.cache.CacheFactory) Map(java.util.Map) Cache(org.apache.geode.cache.Cache) CacheWriterException(org.apache.geode.cache.CacheWriterException) EnumId(org.apache.geode.pdx.internal.EnumId) Test(org.junit.Test) SerializationTest(org.apache.geode.test.junit.categories.SerializationTest) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 12 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class MultiVMRegionTestCase method testRemoteCacheWriter.

/**
   * Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
   * and {@link CacheWriterException}s are propagated appropriately.
   */
@Test
public void testRemoteCacheWriter() throws Exception {
    assertTrue(getRegionAttributes().getScope().isDistributed());
    final String name = this.getUniqueName();
    final Object key = "KEY";
    final Object oldValue = "OLD_VALUE";
    final Object newValue = "NEW_VALUE";
    final Object arg = "ARG";
    final Object exception = "EXCEPTION";
    final Object key2 = "KEY2";
    final Object value2 = "VALUE2";
    SerializableRunnable create = new CacheSerializableRunnable("Create Region") {

        @Override
        public void run2() throws CacheException {
            Region region = createRegion(name);
            // Put key2 in the region before any callbacks are
            // registered, so it can be destroyed later
            region.put(key2, value2);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                RegionEntry re = reRegion.getRegionEntry(key2);
                StoredObject so = (StoredObject) re._getValue();
                assertEquals(1, so.getRefCount());
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    };
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(create);
    vm1.invoke(create);
    //////// Create
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeCreate2(EntryEvent event) throws CacheWriterException {
                    if (exception.equals(event.getCallbackArgument())) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isCreate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(null, event.getOldValue());
                    assertEquals(oldValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
            flushIfNecessary(region);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, oldValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNull(region.getEntry(key));
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Create with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, oldValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
                LocalRegion reRegion;
                reRegion = (LocalRegion) region;
                StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                assertEquals(1, so.getRefCount());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Update
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isUpdate());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(oldValue, event.getOldValue());
                    assertEquals(newValue, event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.put(key, newValue, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                Region.Entry entry = region.getEntry(key);
                assertEquals(oldValue, entry.getValue());
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                    LocalRegion reRegion;
                    reRegion = (LocalRegion) region;
                    StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
                    assertEquals(1, so.getRefCount());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Update with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, newValue, arg);
            assertEquals(2, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(2, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                    assertEquals(key, event.getKey());
                    assertEquals(newValue, event.getOldValue());
                    assertNull(event.getNewValue());
                    assertFalse(event.getOperation().isLoad());
                    assertFalse(event.getOperation().isLocalLoad());
                    assertFalse(event.getOperation().isNetLoad());
                    assertFalse(event.getOperation().isNetSearch());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroy(key, exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                assertNotNull(region.getEntry(key));
                assertEquals(2, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(2, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.destroy(key, arg);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    //////// Region Destroy
    vm1.invoke(new CacheSerializableRunnable("Set Writer") {

        @Override
        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            writer = new TestCacheWriter() {

                @Override
                public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
                    Object argument = event.getCallbackArgument();
                    if (exception.equals(argument)) {
                        String s = "Test Exception";
                        throw new CacheWriterException(s);
                    }
                    assertEquals(arg, argument);
                    assertEquals(region, event.getRegion());
                    assertTrue(event.getOperation().isRegionDestroy());
                    assertTrue(event.getOperation().isDistributed());
                    assertFalse(event.getOperation().isExpiration());
                    assertTrue(event.isOriginRemote());
                }
            };
            region.getAttributesMutator().setCacheWriter(writer);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            try {
                region.destroyRegion(exception);
                fail("Should have thrown a CacheWriterException");
            } catch (CacheWriterException ex) {
                if (region.isDestroyed()) {
                    fail("should not have an exception if region is destroyed", ex);
                }
                assertEquals(1, region.size());
                if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                    GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                    MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                    assertEquals(1, ma.getStats().getObjects());
                }
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {

        @Override
        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            assertEquals(1, region.size());
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                assertEquals(1, ma.getStats().getObjects());
            }
            region.destroyRegion(arg);
            if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
                GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
                final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
                WaitCriterion waitForStatChange = new WaitCriterion() {

                    @Override
                    public boolean done() {
                        return ma.getStats().getObjects() == 0;
                    }

                    @Override
                    public String description() {
                        return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
                    }
                };
                Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
            }
        }
    });
    vm1.invoke(new SerializableRunnable("Verify callback") {

        @Override
        public void run() {
            assertTrue(writer.wasInvoked());
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) MemoryAllocatorImpl(org.apache.geode.internal.offheap.MemoryAllocatorImpl) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) RegionEvent(org.apache.geode.cache.RegionEvent) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) StoredObject(org.apache.geode.internal.offheap.StoredObject) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) EntryEvent(org.apache.geode.cache.EntryEvent) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) RegionEntry(org.apache.geode.internal.cache.RegionEntry) StoredObject(org.apache.geode.internal.offheap.StoredObject) CacheWriterException(org.apache.geode.cache.CacheWriterException) Test(org.junit.Test) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest)

Example 13 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class MapInterfaceJUnitTest method testBeforeRegionClearCallBack.

@Test
public void testBeforeRegionClearCallBack() {
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    DistributedSystem ds = DistributedSystem.connect(props);
    Cache cache = null;
    Region region = null;
    AttributesFactory factory = null;
    try {
        cache = CacheFactory.create(ds);
        factory = new AttributesFactory();
        factory.setScope(Scope.LOCAL);
        factory.setCacheWriter(new CacheWriterAdapter() {

            @Override
            public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
                synchronized (this) {
                    this.notify();
                    MapInterfaceJUnitTest.this.hasBeenNotified = true;
                }
            }
        });
        region = cache.createRegion("testingRegion", factory.create());
        DoesClear doesClear = new DoesClear(region);
        new Thread(doesClear).start();
        synchronized (this) {
            if (!this.hasBeenNotified) {
                this.wait(3000);
            }
        }
        if (!this.hasBeenNotified) {
            fail(" beforeRegionClear call back did not come");
        }
    } catch (Exception e) {
        throw new AssertionError(" failed due to ", e);
    }
    for (int i = 0; i < 100; i++) {
        region.put(new Integer(i), new Integer(i));
    }
    assertEquals(new Integer(50), region.get(new Integer(50)));
    region.localClear();
    assertEquals(null, region.get(new Integer(50)));
    region.close();
    factory.setScope(Scope.DISTRIBUTED_ACK);
    factory.setDataPolicy(DataPolicy.REPLICATE);
    try {
        region = cache.createRegion("testingRegion", factory.create());
    } catch (Exception e) {
        throw new AssertionError(" failed in creating region due to ", e);
    }
    boolean exceptionOccurred = false;
    try {
        region.localClear();
    } catch (UnsupportedOperationException e) {
        exceptionOccurred = true;
    }
    if (!exceptionOccurred) {
        fail(" exception did not occur when it was supposed to occur");
    }
    region.close();
    cache.close();
    ds.disconnect();
}
Also used : CacheWriterAdapter(org.apache.geode.cache.util.CacheWriterAdapter) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) DistributedSystem(org.apache.geode.distributed.DistributedSystem) RegionEvent(org.apache.geode.cache.RegionEvent) CacheWriterException(org.apache.geode.cache.CacheWriterException) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache) CacheWriterException(org.apache.geode.cache.CacheWriterException) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 14 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class EventIDVerificationDUnitTest method createClientCache.

public static void createClientCache(String host, Integer port1, Integer port2) throws Exception {
    PORT1 = port1.intValue();
    PORT2 = port2.intValue();
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    new EventIDVerificationDUnitTest().createCache(props);
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.DISTRIBUTED_ACK);
    factory.setMirrorType(MirrorType.NONE);
    ClientServerTestCase.configureConnectionPool(factory, host, new int[] { PORT1, PORT2 }, true, -1, 2, null, -1, -1, false, -2);
    CacheWriter writer = new CacheWriterAdapter() {

        public void beforeCreate(EntryEvent event) {
            EventID eventId = ((EntryEventImpl) event).getEventId();
            vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            try {
                super.beforeCreate(event);
            } catch (CacheWriterException e) {
                e.printStackTrace();
                fail("Test failed bcoz of exception =" + e);
            }
        }

        public void beforeUpdate(EntryEvent event) {
            EventID eventId = ((EntryEventImpl) event).getEventId();
            vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            try {
                super.beforeUpdate(event);
            } catch (CacheWriterException e) {
                e.printStackTrace();
                fail("Test failed bcoz of exception =" + e);
            }
        }

        public void beforeDestroy(EntryEvent event) {
            EventID eventId = ((EntryEventImpl) event).getEventId();
            vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            try {
                super.beforeDestroy(event);
            } catch (CacheWriterException e) {
                e.printStackTrace();
                fail("Test failed bcoz of exception =" + e);
            }
        }

        public void beforeRegionDestroy(RegionEvent event) {
            EventID eventId = ((RegionEventImpl) event).getEventId();
            vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            try {
                super.beforeRegionDestroy(event);
            } catch (CacheWriterException e) {
                e.printStackTrace();
                fail("Test failed bcoz of exception =" + e);
            }
        }

        public void beforeRegionClear(RegionEvent event) {
            EventID eventId = ((RegionEventImpl) event).getEventId();
            vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
            try {
                super.beforeRegionClear(event);
            } catch (CacheWriterException e) {
                e.printStackTrace();
                fail("Test failed bcoz of exception =" + e);
            }
        }
    };
    factory.setCacheWriter(writer);
    /*
     * factory.setCacheListener(new CacheListenerAdapter() { public void afterCreate(EntryEvent
     * event) { synchronized (this) { threadId = ((EntryEventImpl)event).getEventId().getThreadID();
     * membershipId = ((EntryEventImpl)event).getEventId().getMembershipID(); } }
     *
     * public void afterUpdate(EntryEvent event) { synchronized (this) { verifyEventIDs(event); } }
     *
     * public void afterDestroy(EntryEvent event) { synchronized (this) { verifyEventIDs(event); } }
     * public void afterRegionDestroy(RegionEvent event) { synchronized (this) { threadId =
     * ((RegionEventImpl)event).getEventId().getThreadID(); membershipId =
     * ((RegionEventImpl)event).getEventId().getMembershipID(); } } });
     */
    RegionAttributes attrs = factory.create();
    Region r = cache.createRegion(REGION_NAME, attrs);
    r.registerInterest("ALL_KEYS");
}
Also used : CacheWriterAdapter(org.apache.geode.cache.util.CacheWriterAdapter) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) RegionAttributes(org.apache.geode.cache.RegionAttributes) RegionEventImpl(org.apache.geode.internal.cache.RegionEventImpl) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) RegionEvent(org.apache.geode.cache.RegionEvent) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheWriter(org.apache.geode.cache.CacheWriter) EntryEvent(org.apache.geode.cache.EntryEvent) EventID(org.apache.geode.internal.cache.EventID) Region(org.apache.geode.cache.Region) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 15 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class GemFireCacheImpl method shutDownOnePRGracefully.

private void shutDownOnePRGracefully(PartitionedRegion partitionedRegion) {
    boolean acquiredLock = false;
    try {
        partitionedRegion.acquireDestroyLock();
        acquiredLock = true;
        synchronized (partitionedRegion.getRedundancyProvider()) {
            if (partitionedRegion.isDataStore() && partitionedRegion.getDataStore() != null && partitionedRegion.getDataPolicy() == DataPolicy.PERSISTENT_PARTITION) {
                int numBuckets = partitionedRegion.getTotalNumberOfBuckets();
                Map<InternalDistributedMember, PersistentMemberID>[] bucketMaps = new Map[numBuckets];
                PartitionedRegionDataStore dataStore = partitionedRegion.getDataStore();
                // lock all the primary buckets
                Set<Entry<Integer, BucketRegion>> bucketEntries = dataStore.getAllLocalBuckets();
                for (Entry e : bucketEntries) {
                    BucketRegion bucket = (BucketRegion) e.getValue();
                    if (bucket == null || bucket.isDestroyed) {
                        // bucket region could be destroyed in race condition
                        continue;
                    }
                    bucket.getBucketAdvisor().tryLockIfPrimary();
                    // get map <InternalDistributedMember, persistentID> for this bucket's
                    // remote members
                    bucketMaps[bucket.getId()] = bucket.getBucketAdvisor().adviseInitializedPersistentMembers();
                    if (logger.isDebugEnabled()) {
                        logger.debug("shutDownAll: PR {}: initialized persistent members for {}:{}", partitionedRegion.getName(), bucket.getId(), bucketMaps[bucket.getId()]);
                    }
                }
                if (logger.isDebugEnabled()) {
                    logger.debug("shutDownAll: All buckets for PR {} are locked.", partitionedRegion.getName());
                }
                // send lock profile update to other members
                partitionedRegion.setShutDownAllStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
                new UpdateAttributesProcessor(partitionedRegion).distribute(false);
                partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
                if (logger.isDebugEnabled()) {
                    logger.debug("shutDownAll: PR {}: all bucketLock profiles received.", partitionedRegion.getName());
                }
                // if async write, do flush
                if (!partitionedRegion.getAttributes().isDiskSynchronous()) {
                    // several PRs might share the same diskStore, we will only flush once
                    // even flush is called several times.
                    partitionedRegion.getDiskStore().forceFlush();
                    // send flush profile update to other members
                    partitionedRegion.setShutDownAllStatus(PartitionedRegion.DISK_STORE_FLUSHED);
                    new UpdateAttributesProcessor(partitionedRegion).distribute(false);
                    partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.DISK_STORE_FLUSHED);
                    if (logger.isDebugEnabled()) {
                        logger.debug("shutDownAll: PR {}: all flush profiles received.", partitionedRegion.getName());
                    }
                }
                // async write
                // persist other members to OFFLINE_EQUAL for each bucket region
                // iterate through all the bucketMaps and exclude the items whose
                // idm is no longer online
                Set<InternalDistributedMember> membersToPersistOfflineEqual = partitionedRegion.getRegionAdvisor().adviseDataStore();
                for (Entry e : bucketEntries) {
                    BucketRegion bucket = (BucketRegion) e.getValue();
                    if (bucket == null || bucket.isDestroyed) {
                        // bucket region could be destroyed in race condition
                        continue;
                    }
                    Map<InternalDistributedMember, PersistentMemberID> persistMap = getSubMapForLiveMembers(membersToPersistOfflineEqual, bucketMaps[bucket.getId()]);
                    if (persistMap != null) {
                        bucket.getPersistenceAdvisor().persistMembersOfflineAndEqual(persistMap);
                        if (logger.isDebugEnabled()) {
                            logger.debug("shutDownAll: PR {}: persisting bucket {}:{}", partitionedRegion.getName(), bucket.getId(), persistMap);
                        }
                    }
                }
                // send persisted profile update to other members, let all members to persist
                // before close the region
                partitionedRegion.setShutDownAllStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
                new UpdateAttributesProcessor(partitionedRegion).distribute(false);
                partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
                if (logger.isDebugEnabled()) {
                    logger.debug("shutDownAll: PR {}: all offline_equal profiles received.", partitionedRegion.getName());
                }
            }
            // dataStore
            // after done all steps for buckets, close partitionedRegion
            // close accessor directly
            RegionEventImpl event = new RegionEventImpl(partitionedRegion, Operation.REGION_CLOSE, null, false, getMyId(), true);
            try {
                // not to acquire lock
                partitionedRegion.basicDestroyRegion(event, false, false, true);
            } catch (CacheWriterException e) {
                // not possible with local operation, CacheWriter not called
                throw new Error(LocalizedStrings.LocalRegion_CACHEWRITEREXCEPTION_SHOULD_NOT_BE_THROWN_IN_LOCALDESTROYREGION.toLocalizedString(), e);
            } catch (TimeoutException e) {
                // not possible with local operation, no distributed locks possible
                throw new Error(LocalizedStrings.LocalRegion_TIMEOUTEXCEPTION_SHOULD_NOT_BE_THROWN_IN_LOCALDESTROYREGION.toLocalizedString(), e);
            }
        }
    // synchronized
    } catch (CacheClosedException cce) {
        logger.debug("Encounter CacheClosedException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), cce.getMessage());
    } catch (CancelException ce) {
        logger.debug("Encounter CancelException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), ce.getMessage());
    } catch (RegionDestroyedException rde) {
        logger.debug("Encounter CacheDestroyedException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), rde.getMessage());
    } finally {
        if (acquiredLock) {
            partitionedRegion.releaseDestroyLock();
        }
    }
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) InternalGemFireError(org.apache.geode.InternalGemFireError) CacheClosedException(org.apache.geode.cache.CacheClosedException) SimpleExtensionPoint(org.apache.geode.internal.cache.extension.SimpleExtensionPoint) ExtensionPoint(org.apache.geode.internal.cache.extension.ExtensionPoint) PersistentMemberID(org.apache.geode.internal.cache.persistence.PersistentMemberID) Entry(java.util.Map.Entry) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) CancelException(org.apache.geode.CancelException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) CacheWriterException(org.apache.geode.cache.CacheWriterException) TimeoutException(org.apache.geode.cache.TimeoutException)

Aggregations

CacheWriterException (org.apache.geode.cache.CacheWriterException)50 Region (org.apache.geode.cache.Region)22 Test (org.junit.Test)14 EntryEvent (org.apache.geode.cache.EntryEvent)13 Released (org.apache.geode.internal.offheap.annotations.Released)12 AttributesFactory (org.apache.geode.cache.AttributesFactory)11 CacheException (org.apache.geode.cache.CacheException)11 TimeoutException (org.apache.geode.cache.TimeoutException)11 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)10 CacheWriterAdapter (org.apache.geode.cache.util.CacheWriterAdapter)9 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)9 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)8 VM (org.apache.geode.test.dunit.VM)8 Cache (org.apache.geode.cache.Cache)7 RegionEvent (org.apache.geode.cache.RegionEvent)7 Host (org.apache.geode.test.dunit.Host)7 IOException (java.io.IOException)6 CacheWriter (org.apache.geode.cache.CacheWriter)6 EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)6 InternalGemFireError (org.apache.geode.InternalGemFireError)5