Search in sources :

Example 71 with ReplyException

use of org.apache.geode.distributed.internal.ReplyException in project geode by apache.

the class PrepareRevokePersistentIDRequest method send.

private static void send(DM dm, PersistentMemberPattern pattern, boolean cancel) {
    Set recipients = dm.getOtherDistributionManagerIds();
    recipients.remove(dm.getId());
    PrepareRevokePersistentIDRequest request = new PrepareRevokePersistentIDRequest(pattern, cancel);
    request.setRecipients(recipients);
    AdminMultipleReplyProcessor replyProcessor = new AdminMultipleReplyProcessor(dm, recipients);
    request.msgId = replyProcessor.getProcessorId();
    dm.putOutgoing(request);
    try {
        replyProcessor.waitForReplies();
    } catch (ReplyException e) {
        if (e.getCause() instanceof CancelException) {
            // ignore
            return;
        }
        throw e;
    } catch (InterruptedException e) {
        logger.warn(e);
    }
    request.setSender(dm.getId());
    request.createResponse((DistributionManager) dm);
}
Also used : Set(java.util.Set) CancelException(org.apache.geode.CancelException) ReplyException(org.apache.geode.distributed.internal.ReplyException)

Example 72 with ReplyException

use of org.apache.geode.distributed.internal.ReplyException in project geode by apache.

the class MembershipFlushRequest method process.

@Override
protected void process(DistributionManager dm) {
    int initLevel = LocalRegion.ANY_INIT;
    int oldLevel = LocalRegion.setThreadInitLevelRequirement(initLevel);
    ReplyException exception = null;
    try {
        // get the region from the path, but do NOT wait on initialization,
        // otherwise we could have a distributed deadlock
        Cache cache = CacheFactory.getInstance(dm.getSystem());
        PartitionedRegion region = (PartitionedRegion) cache.getRegion(this.regionPath);
        if (region != null && region.getRegionAdvisor().isInitialized()) {
            ProxyBucketRegion[] proxyBuckets = region.getRegionAdvisor().getProxyBucketArray();
            // buckets are null if initPRInternals is still not complete
            if (proxyBuckets != null) {
                for (ProxyBucketRegion bucket : proxyBuckets) {
                    final BucketPersistenceAdvisor persistenceAdvisor = bucket.getPersistenceAdvisor();
                    if (persistenceAdvisor != null) {
                        persistenceAdvisor.flushMembershipChanges();
                    }
                }
            }
        }
    } catch (RegionDestroyedException e) {
    // ignore
    } catch (CancelException e) {
    // ignore
    } catch (VirtualMachineError e) {
        SystemFailure.initiateFailure(e);
        throw e;
    } catch (Throwable t) {
        SystemFailure.checkFailure();
        exception = new ReplyException(t);
    } finally {
        LocalRegion.setThreadInitLevelRequirement(oldLevel);
        ReplyMessage replyMsg = new ReplyMessage();
        replyMsg.setRecipient(getSender());
        replyMsg.setProcessorId(processorId);
        if (exception != null) {
            replyMsg.setException(exception);
        }
        dm.putOutgoing(replyMsg);
    }
}
Also used : BucketPersistenceAdvisor(org.apache.geode.internal.cache.BucketPersistenceAdvisor) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ProxyBucketRegion(org.apache.geode.internal.cache.ProxyBucketRegion) CancelException(org.apache.geode.CancelException) ReplyException(org.apache.geode.distributed.internal.ReplyException) ReplyMessage(org.apache.geode.distributed.internal.ReplyMessage) Cache(org.apache.geode.cache.Cache)

Example 73 with ReplyException

use of org.apache.geode.distributed.internal.ReplyException in project geode by apache.

the class PersistentStateQueryMessage method process.

@Override
protected void process(DistributionManager dm) {
    // Set thread local flag to allow entrance through initialization Latch
    int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT);
    PersistentMemberState state = null;
    PersistentMemberID myId = null;
    PersistentMemberID myInitializingId = null;
    DiskStoreID diskStoreId = null;
    HashSet<PersistentMemberID> onlineMembers = null;
    ReplyException exception = null;
    boolean successfulReply = false;
    try {
        // get the region from the path, but do NOT wait on initialization,
        // otherwise we could have a distributed deadlock
        Cache cache = CacheFactory.getInstance(dm.getSystem());
        Region region = cache.getRegion(this.regionPath);
        PersistenceAdvisor persistenceAdvisor = null;
        if (region instanceof DistributedRegion) {
            persistenceAdvisor = ((DistributedRegion) region).getPersistenceAdvisor();
        } else if (region == null) {
            Bucket proxy = PartitionedRegionHelper.getProxyBucketRegion(GemFireCacheImpl.getInstance(), this.regionPath, false);
            if (proxy != null) {
                persistenceAdvisor = proxy.getPersistenceAdvisor();
            }
        }
        if (persistenceAdvisor != null) {
            if (id != null) {
                state = persistenceAdvisor.getPersistedStateOfMember(id);
            }
            if (initializingId != null && state == null) {
                state = persistenceAdvisor.getPersistedStateOfMember(initializingId);
            }
            myId = persistenceAdvisor.getPersistentID();
            myInitializingId = persistenceAdvisor.getInitializingID();
            onlineMembers = persistenceAdvisor.getPersistedOnlineOrEqualMembers();
            diskStoreId = persistenceAdvisor.getDiskStoreID();
            successfulReply = true;
        }
    } catch (RegionDestroyedException e) {
        logger.debug("<RegionDestroyed> {}", this);
    } catch (CancelException e) {
        logger.debug("<CancelException> {}", this);
    } catch (VirtualMachineError e) {
        SystemFailure.initiateFailure(e);
        throw e;
    } catch (Throwable t) {
        SystemFailure.checkFailure();
        exception = new ReplyException(t);
    } finally {
        LocalRegion.setThreadInitLevelRequirement(oldLevel);
        ReplyMessage replyMsg;
        if (successfulReply) {
            PersistentStateQueryReplyMessage persistentReplyMessage = new PersistentStateQueryReplyMessage();
            persistentReplyMessage.myId = myId;
            persistentReplyMessage.persistedStateOfPeer = state;
            persistentReplyMessage.myInitializingId = myInitializingId;
            persistentReplyMessage.diskStoreId = diskStoreId;
            persistentReplyMessage.onlineMembers = onlineMembers;
            replyMsg = persistentReplyMessage;
        } else {
            replyMsg = new ReplyMessage();
        }
        replyMsg.setProcessorId(processorId);
        replyMsg.setRecipient(getSender());
        if (exception != null) {
            replyMsg.setException(exception);
        }
        if (logger.isDebugEnabled()) {
            logger.debug("Received {},replying with {}", this, replyMsg);
        }
        dm.putOutgoing(replyMsg);
    }
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ReplyException(org.apache.geode.distributed.internal.ReplyException) ReplyMessage(org.apache.geode.distributed.internal.ReplyMessage) Bucket(org.apache.geode.internal.cache.partitioned.Bucket) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) Cache(org.apache.geode.cache.Cache)

Example 74 with ReplyException

use of org.apache.geode.distributed.internal.ReplyException in project geode by apache.

the class DestroyLuceneIndexMessage method process.

@Override
protected void process(DistributionManager dm) {
    ReplyException replyException = null;
    try {
        if (logger.isDebugEnabled()) {
            logger.debug("DestroyLuceneIndexMessage: Destroying regionPath=" + this.regionPath + "; indexName=" + this.indexName);
        }
        try {
            InternalCache cache = GemFireCacheImpl.getInstance();
            LuceneServiceImpl impl = (LuceneServiceImpl) LuceneServiceProvider.get(cache);
            impl.destroyIndex(this.indexName, this.regionPath, false);
            if (logger.isDebugEnabled()) {
                logger.debug("DestroyLuceneIndexMessage: Destroyed regionPath=" + this.regionPath + "; indexName=" + this.indexName);
            }
        } catch (Throwable e) {
            replyException = new ReplyException(e);
            if (logger.isDebugEnabled()) {
                logger.debug("DestroyLuceneIndexMessage: Caught the following exception attempting to destroy indexName=" + this.indexName + "; regionPath=" + this.regionPath + ":", e);
            }
        }
    } finally {
        ReplyMessage replyMsg = new ReplyMessage();
        replyMsg.setRecipient(getSender());
        replyMsg.setProcessorId(this.processorId);
        if (replyException != null) {
            replyMsg.setException(replyException);
        }
        dm.putOutgoing(replyMsg);
    }
}
Also used : InternalCache(org.apache.geode.internal.cache.InternalCache) ReplyException(org.apache.geode.distributed.internal.ReplyException) ReplyMessage(org.apache.geode.distributed.internal.ReplyMessage)

Example 75 with ReplyException

use of org.apache.geode.distributed.internal.ReplyException in project geode by apache.

the class SerializableMonth method partitionedRegionTest.

public void partitionedRegionTest(final String prName) {
    /*
     * Do put() operations through VM with PR having both Accessor and Datastore
     */
    vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            int size = 0;
            size = pr.size();
            assertEquals("Size doesnt return expected value", 0, size);
            assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
            assertEquals(0, pr.keySet().size());
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2100))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys1.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm1.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2200))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys2.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm2.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2300))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys3.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm3.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations3") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2400))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys4.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable("verifyKeysonVM0") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys1.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm1.invoke(new CacheSerializableRunnable("verifyKeysonVM1") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys2.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm2.invoke(new CacheSerializableRunnable("verifyKeysonVM2") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys3.iterator();
            itr = listOfKeys3.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        // assertIndexDetailsEquals(bucketId, bucketId);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm3.invoke(new CacheSerializableRunnable("verifyKeysonVM3") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys4.iterator();
            itr = listOfKeys4.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            assertEquals(pr.getDataStore(), null);
        }
    });
}
Also used : BucketVisitor(org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor) ReplyException(org.apache.geode.distributed.internal.ReplyException) EntryOperationImpl(org.apache.geode.internal.cache.EntryOperationImpl) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion)

Aggregations

ReplyException (org.apache.geode.distributed.internal.ReplyException)75 CancelException (org.apache.geode.CancelException)24 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)20 Set (java.util.Set)16 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)16 HashSet (java.util.HashSet)12 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)10 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)8 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)8 IOException (java.io.IOException)7 CacheClosedException (org.apache.geode.cache.CacheClosedException)7 ReplyMessage (org.apache.geode.distributed.internal.ReplyMessage)7 Cache (org.apache.geode.cache.Cache)6 CacheException (org.apache.geode.cache.CacheException)6 Region (org.apache.geode.cache.Region)6 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)6 PrimaryBucketException (org.apache.geode.internal.cache.PrimaryBucketException)6 Released (org.apache.geode.internal.offheap.annotations.Released)6 DistributedSystemDisconnectedException (org.apache.geode.distributed.DistributedSystemDisconnectedException)5 ReplyProcessor21 (org.apache.geode.distributed.internal.ReplyProcessor21)5