Search in sources :

Example 6 with PRLocallyDestroyedException

use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.

the class UpdateVersionDUnitTest method testUpdateVersionAfterCreateWithParallelSender.

@Test
public void testUpdateVersionAfterCreateWithParallelSender() {
    Host host = Host.getHost(0);
    // server1 site1
    VM vm0 = host.getVM(0);
    // server2 site1
    VM vm1 = host.getVM(1);
    // server1 site2
    VM vm2 = host.getVM(2);
    // server2 site2
    VM vm3 = host.getVM(3);
    // Site 1
    Integer lnPort = (Integer) vm0.invoke(() -> UpdateVersionDUnitTest.createFirstLocatorWithDSId(1));
    final String key = "key-1";
    vm0.invoke(() -> UpdateVersionDUnitTest.createCache(lnPort));
    vm0.invoke(() -> UpdateVersionDUnitTest.createSender("ln1", 2, true, 10, 1, false, false, null, true));
    vm0.invoke(() -> UpdateVersionDUnitTest.createPartitionedRegion(regionName, "ln1", 1, 1));
    vm0.invoke(() -> UpdateVersionDUnitTest.startSender("ln1"));
    vm0.invoke(() -> UpdateVersionDUnitTest.waitForSenderRunningState("ln1"));
    // Site 2
    Integer nyPort = (Integer) vm2.invoke(() -> UpdateVersionDUnitTest.createFirstRemoteLocator(2, lnPort));
    Integer nyRecPort = (Integer) vm2.invoke(() -> UpdateVersionDUnitTest.createReceiver(nyPort));
    vm2.invoke(() -> UpdateVersionDUnitTest.createPartitionedRegion(regionName, "", 1, 1));
    vm3.invoke(() -> UpdateVersionDUnitTest.createCache(nyPort));
    vm3.invoke(() -> UpdateVersionDUnitTest.createPartitionedRegion(regionName, "", 1, 1));
    final VersionTag tag = (VersionTag) vm0.invoke(new SerializableCallable("Put a single entry and get its version") {

        @Override
        public Object call() throws CacheException {
            Cache cache = CacheFactory.getAnyInstance();
            Region region = cache.getRegion(regionName);
            assertTrue(region instanceof PartitionedRegion);
            region.put(key, "value-1");
            region.put(key, "value-2");
            Entry entry = region.getEntry(key);
            assertTrue(entry instanceof EntrySnapshot);
            RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
            VersionStamp stamp = regionEntry.getVersionStamp();
            // Create a duplicate entry version tag from stamp with newer
            // time-stamp.
            VersionSource memberId = (VersionSource) cache.getDistributedSystem().getDistributedMember();
            VersionTag tag = VersionTag.create(memberId);
            int entryVersion = stamp.getEntryVersion() - 1;
            int dsid = stamp.getDistributedSystemId();
            long time = System.currentTimeMillis();
            tag.setEntryVersion(entryVersion);
            tag.setDistributedSystemId(dsid);
            tag.setVersionTimeStamp(time);
            tag.setIsRemoteForTesting();
            EntryEventImpl event = createNewEvent((PartitionedRegion) region, tag, entry.getKey(), "value-3");
            ((LocalRegion) region).basicUpdate(event, false, true, 0L, false);
            // Verify the new stamp
            entry = region.getEntry(key);
            assertTrue(entry instanceof EntrySnapshot);
            regionEntry = ((EntrySnapshot) entry).getRegionEntry();
            stamp = regionEntry.getVersionStamp();
            assertEquals("Time stamp did NOT get updated by UPDATE_VERSION operation on LocalRegion", time, stamp.getVersionTimeStamp());
            assertEquals(++entryVersion, stamp.getEntryVersion());
            assertEquals(dsid, stamp.getDistributedSystemId());
            return stamp.asVersionTag();
        }
    });
    VersionTag remoteTag = (VersionTag) vm3.invoke(new SerializableCallable("Get timestamp from remote site") {

        @Override
        public Object call() throws Exception {
            Cache cache = CacheFactory.getAnyInstance();
            final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
            // wait for entry to be received
            WaitCriterion wc = new WaitCriterion() {

                public boolean done() {
                    Entry<?, ?> entry = null;
                    try {
                        entry = region.getDataStore().getEntryLocally(0, key, false, false);
                    } catch (EntryNotFoundException e) {
                    // expected
                    } catch (ForceReattemptException e) {
                    // expected
                    } catch (PRLocallyDestroyedException e) {
                        throw new RuntimeException("unexpected exception", e);
                    }
                    if (entry != null) {
                        LogWriterUtils.getLogWriter().info("found entry " + entry);
                    }
                    return (entry != null);
                }

                public String description() {
                    return "Expected key-1 to be received on remote WAN site";
                }
            };
            Wait.waitForCriterion(wc, 30000, 500, true);
            wc = new WaitCriterion() {

                public boolean done() {
                    Entry entry = region.getEntry(key);
                    assertTrue(entry instanceof EntrySnapshot);
                    RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
                    return regionEntry.getVersionStamp().getVersionTimeStamp() == tag.getVersionTimeStamp();
                }

                public String description() {
                    return "waiting for timestamp to be updated";
                }
            };
            Wait.waitForCriterion(wc, 30000, 500, true);
            Entry entry = region.getEntry(key);
            assertTrue(entry instanceof EntrySnapshot);
            RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
            VersionStamp stamp = regionEntry.getVersionStamp();
            return stamp.asVersionTag();
        }
    });
    assertEquals("Local and remote site have different timestamps", tag.getVersionTimeStamp(), remoteTag.getVersionTimeStamp());
}
Also used : Host(org.apache.geode.test.dunit.Host) VersionStamp(org.apache.geode.internal.cache.versions.VersionStamp) Entry(org.apache.geode.cache.Region.Entry) NonTXEntry(org.apache.geode.internal.cache.LocalRegion.NonTXEntry) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VersionSource(org.apache.geode.internal.cache.versions.VersionSource) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 7 with PRLocallyDestroyedException

use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.

the class PartitionedRegion method getEntryInBucket.

protected EntrySnapshot getEntryInBucket(final DistributedMember targetNode, final int bucketId, final Object key, boolean access, final boolean allowTombstones) {
    final int retryAttempts = calcRetry();
    if (logger.isTraceEnabled()) {
        logger.trace("getEntryInBucket: " + "Key key={} ({}) from: {} bucketId={}", key, key.hashCode(), targetNode, bucketStringForLogs(bucketId));
    }
    Integer bucketIdInt = bucketId;
    EntrySnapshot ret = null;
    int count = 0;
    RetryTimeKeeper retryTime = null;
    InternalDistributedMember retryNode = (InternalDistributedMember) targetNode;
    while (count <= retryAttempts) {
        // Every continuation should check for DM cancellation
        if (retryNode == null) {
            checkReadiness();
            if (retryTime == null) {
                retryTime = new RetryTimeKeeper(this.retryTimeout);
            }
            if (retryTime.overMaximum()) {
                break;
            }
            retryNode = getOrCreateNodeForBucketRead(bucketId);
            // No storage found for bucket, early out preventing hot loop, bug 36819
            if (retryNode == null) {
                checkShutdown();
                return null;
            }
            continue;
        }
        try {
            final boolean loc = (this.localMaxMemory > 0) && retryNode.equals(getMyId());
            if (loc) {
                ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones);
            } else {
                ret = getEntryRemotely(retryNode, bucketIdInt, key, access, allowTombstones);
                // TODO:Suranjan&Yogesh : there should be better way than this one
                String name = Thread.currentThread().getName();
                if (name.startsWith("ServerConnection") && !getMyId().equals(targetNode)) {
                    setNetworkHopType(bucketIdInt, (InternalDistributedMember) targetNode);
                }
            }
            return ret;
        } catch (PRLocallyDestroyedException pde) {
            if (logger.isDebugEnabled()) {
                logger.debug("getEntryInBucket: Encountered PRLocallyDestroyedException", pde);
            }
            checkReadiness();
        } catch (EntryNotFoundException ignore) {
            return null;
        } catch (ForceReattemptException prce) {
            prce.checkKey(key);
            if (logger.isDebugEnabled()) {
                logger.debug("getEntryInBucket: retrying, attempts so far: {}", count, prce);
            }
            checkReadiness();
            InternalDistributedMember lastNode = retryNode;
            retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
            if (lastNode.equals(retryNode)) {
                if (retryTime == null) {
                    retryTime = new RetryTimeKeeper(this.retryTimeout);
                }
                if (retryTime.overMaximum()) {
                    break;
                }
                retryTime.waitToRetryNode();
            }
        } catch (PrimaryBucketException notPrimary) {
            if (logger.isDebugEnabled()) {
                logger.debug("Bucket {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
            }
            getRegionAdvisor().notPrimary(bucketIdInt, retryNode);
            retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
        }
        // It's possible this is a GemFire thread e.g. ServerConnection
        // which got to this point because of a distributed system shutdown or
        // region closure which uses interrupt to break any sleep() or wait()
        // calls
        // e.g. waitForPrimary
        checkShutdown();
        count++;
        if (count == 1) {
            this.prStats.incContainsKeyValueOpsRetried();
        }
        this.prStats.incContainsKeyValueRetries();
    }
    // Fix for bug 36014
    PartitionedRegionDistributionException e = null;
    if (logger.isDebugEnabled()) {
        e = new PartitionedRegionDistributionException(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GETENTRY_IN_0_ATTEMPTS.toLocalizedString(count));
    }
    logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GETENTRY_IN_0_ATTEMPTS, count), e);
    return null;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException)

Example 8 with PRLocallyDestroyedException

use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.

the class PartitionedRegion method getBucketKeys.

/**
   * Fetch the keys for the given bucket identifier, if the bucket is local or remote. This version
   * of the method allows you to retrieve Tombstone entries as well as undestroyed entries.
   * 
   * @param allowTombstones whether to include destroyed entries in the result
   * @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no keys can be found.
   */
public Set getBucketKeys(int bucketNum, boolean allowTombstones) {
    Integer buck = bucketNum;
    final int retryAttempts = calcRetry();
    Set ret = null;
    int count = 0;
    InternalDistributedMember nod = getOrCreateNodeForBucketRead(bucketNum);
    RetryTimeKeeper snoozer = null;
    while (count <= retryAttempts) {
        // It's possible this is a GemFire thread e.g. ServerConnection
        // which got to this point because of a distributed system shutdown or
        // region closure which uses interrupt to break any sleep() or wait()
        // calls
        // e.g. waitForPrimary or waitForBucketRecovery
        checkShutdown();
        if (nod == null) {
            if (snoozer == null) {
                snoozer = new RetryTimeKeeper(this.retryTimeout);
            }
            nod = getOrCreateNodeForBucketRead(bucketNum);
            // No storage found for bucket, early out preventing hot loop, bug 36819
            if (nod == null) {
                checkShutdown();
                break;
            }
            count++;
            continue;
        }
        try {
            if (nod.equals(getMyId())) {
                ret = this.dataStore.getKeysLocally(buck, allowTombstones);
            } else {
                FetchKeysResponse r = FetchKeysMessage.send(nod, this, buck, allowTombstones);
                ret = r.waitForKeys();
            }
            if (ret != null) {
                return ret;
            }
        } catch (PRLocallyDestroyedException ignore) {
            if (logger.isDebugEnabled()) {
                logger.debug("getBucketKeys: Encountered PRLocallyDestroyedException");
            }
            checkReadiness();
        } catch (ForceReattemptException prce) {
            if (logger.isDebugEnabled()) {
                logger.debug("getBucketKeys: attempt:{}", (count + 1), prce);
            }
            checkReadiness();
            if (snoozer == null) {
                snoozer = new RetryTimeKeeper(this.retryTimeout);
            }
            InternalDistributedMember oldNode = nod;
            nod = getNodeForBucketRead(buck);
            if (nod != null && nod.equals(oldNode)) {
                if (snoozer.overMaximum()) {
                    checkReadiness();
                    throw new TimeoutException(LocalizedStrings.PartitionedRegion_ATTEMPT_TO_ACQUIRE_PRIMARY_NODE_FOR_READ_ON_BUCKET_0_TIMED_OUT_IN_1_MS.toLocalizedString(new Object[] { getBucketName(buck), snoozer.getRetryTime() }));
                }
                snoozer.waitToRetryNode();
            }
        }
        count++;
    }
    if (logger.isDebugEnabled()) {
        logger.debug("getBucketKeys: no keys found returning empty set");
    }
    return Collections.emptySet();
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HashSet(java.util.HashSet) Set(java.util.Set) ResultsSet(org.apache.geode.cache.query.internal.ResultsSet) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) FetchKeysResponse(org.apache.geode.internal.cache.partitioned.FetchKeysMessage.FetchKeysResponse) TimeoutException(org.apache.geode.cache.TimeoutException)

Example 9 with PRLocallyDestroyedException

use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.

the class ColocationHelper method getColocatedChildRegions.

/**
   * A utility method to retrieve all child partitioned regions that are directly colocated to the
   * specified partitioned region.<br>
   * <p>
   * For example, shipmentPR is colocated with orderPR and orderPR is colocated with customerPR.
   * <br>
   * getColocatedChildRegions(customerPR) will return List{orderPR}<br>
   * getColocatedChildRegions(orderPR) will return List{shipmentPR}<br>
   * getColocatedChildRegions(shipmentPR) will return empty List{}<br>
   * 
   * @return list of all child partitioned regions colocated with the region
   * @since GemFire 5.8Beta
   */
public static List<PartitionedRegion> getColocatedChildRegions(PartitionedRegion partitionedRegion) {
    List<PartitionedRegion> colocatedChildRegions = new ArrayList<PartitionedRegion>();
    Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache());
    PartitionRegionConfig prConf = null;
    // final List allPRNamesList = new ArrayList(prRoot.keySet());
    Iterator itr = prRoot.keySet().iterator();
    while (itr.hasNext()) {
        try {
            String prName = (String) itr.next();
            if (prName.equals(partitionedRegion.getRegionIdentifier())) {
                // region can't be a child of itself
                continue;
            }
            try {
                prConf = (PartitionRegionConfig) prRoot.get(prName);
            } catch (EntryDestroyedException ignore) {
                continue;
            }
            if (prConf == null) {
                // merge so I added this check and continue
                continue;
            }
            int prID = prConf.getPRId();
            PartitionedRegion prRegion = PartitionedRegion.getPRFromId(prID);
            if (prRegion != null) {
                if (prRegion.getColocatedWith() != null) {
                    if (prRegion.getColocatedWith().equals(partitionedRegion.getFullPath()) || ("/" + prRegion.getColocatedWith()).equals(partitionedRegion.getFullPath())) {
                        // only regions directly colocatedWith partitionedRegion are
                        // added to the list...
                        prRegion.waitOnBucketMetadataInitialization();
                        colocatedChildRegions.add(prRegion);
                    }
                }
            }
        } catch (PRLocallyDestroyedException e) {
            if (logger.isDebugEnabled()) {
                logger.debug("PRLocallyDestroyedException : Region ={} is locally destroyed on this node", prConf.getPRId(), e);
            }
        } catch (RegionDestroyedException e) {
            if (logger.isDebugEnabled()) {
                logger.debug("RegionDestroyedException : Region ={} is destroyed.", prConf.getPRId(), e);
            }
        }
    }
    // Fix for 44484 - Make the list of colocated child regions
    // is always in the same order on all nodes.
    Collections.sort(colocatedChildRegions, new Comparator<PartitionedRegion>() {

        @Override
        public int compare(PartitionedRegion o1, PartitionedRegion o2) {
            if (o1.isShadowPR() == o2.isShadowPR()) {
                return o1.getFullPath().compareTo(o2.getFullPath());
            }
            if (o1.isShadowPR()) {
                return 1;
            }
            return -1;
        }
    });
    return colocatedChildRegions;
}
Also used : EntryDestroyedException(org.apache.geode.cache.EntryDestroyedException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ArrayList(java.util.ArrayList) Iterator(java.util.Iterator) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) Region(org.apache.geode.cache.Region)

Example 10 with PRLocallyDestroyedException

use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.

the class PartitionedRegion method getFromBucket.

/**
   * @param requestingClient the client requesting the object, or null if not from a client
   * @param allowRetry if false then do not retry
   */
private Object getFromBucket(final InternalDistributedMember targetNode, int bucketId, final Object key, final Object aCallbackArgument, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowRetry) {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    final int retryAttempts = calcRetry();
    Object obj;
    // retry the get remotely until it finds the right node managing the bucket
    int count = 0;
    RetryTimeKeeper retryTime = null;
    InternalDistributedMember retryNode = targetNode;
    while (count <= retryAttempts) {
        // Every continuation should check for DM cancellation
        if (retryNode == null) {
            checkReadiness();
            if (retryTime == null) {
                retryTime = new RetryTimeKeeper(this.retryTimeout);
            }
            retryNode = getNodeForBucketReadOrLoad(bucketId);
            // No storage found for bucket, early out preventing hot loop, bug 36819
            if (retryNode == null) {
                checkShutdown();
                return null;
            }
            continue;
        }
        final boolean isLocal = this.localMaxMemory > 0 && retryNode.equals(getMyId());
        try {
            if (isLocal) {
                obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false);
            } else {
                if (this.haveCacheLoader) {
                    /* MergeGemXDHDFSToGFE -readoing from local bucket was disabled in GemXD */
                    if (null != (obj = getFromLocalBucket(bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones))) {
                        return obj;
                    }
                }
                obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones);
                // TODO: there should be better way than this one
                String name = Thread.currentThread().getName();
                if (name.startsWith("ServerConnection") && !getMyId().equals(retryNode)) {
                    setNetworkHopType(bucketId, (InternalDistributedMember) retryNode);
                }
            }
            return obj;
        } catch (PRLocallyDestroyedException pde) {
            if (isDebugEnabled) {
                logger.debug("getFromBucket Encountered PRLocallyDestroyedException", pde);
            }
            checkReadiness();
            if (allowRetry) {
                retryNode = getNodeForBucketReadOrLoad(bucketId);
            } else {
                // Only transactions set allowRetry to false,
                // fail the transaction here as region is destroyed.
                Throwable cause = pde.getCause();
                if (cause != null && cause instanceof RegionDestroyedException) {
                    throw (RegionDestroyedException) cause;
                } else {
                    // set the cause to RegionDestroyedException.
                    throw new RegionDestroyedException(toString(), getFullPath());
                }
            }
        } catch (ForceReattemptException prce) {
            prce.checkKey(key);
            checkReadiness();
            if (allowRetry) {
                InternalDistributedMember lastNode = retryNode;
                if (isDebugEnabled) {
                    logger.debug("getFromBucket: retry attempt: {} of {}", count, retryAttempts, prce);
                }
                retryNode = getNodeForBucketReadOrLoad(bucketId);
                if (lastNode.equals(retryNode)) {
                    if (retryTime == null) {
                        retryTime = new RetryTimeKeeper(this.retryTimeout);
                    }
                    if (retryTime.overMaximum()) {
                        break;
                    }
                    if (isDebugEnabled) {
                        logger.debug("waiting to retry node {}", retryNode);
                    }
                    retryTime.waitToRetryNode();
                }
            } else {
                // with transaction
                if (prce instanceof BucketNotFoundException) {
                    throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(key), prce);
                }
                Throwable cause = prce.getCause();
                if (cause instanceof PrimaryBucketException) {
                    throw (PrimaryBucketException) cause;
                } else if (cause instanceof TransactionDataRebalancedException) {
                    throw (TransactionDataRebalancedException) cause;
                } else if (cause instanceof RegionDestroyedException) {
                    throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(key), cause);
                } else {
                    // Should not see it currently, added to be protected against future changes.
                    throw new TransactionException("Failed to get key: " + key, prce);
                }
            }
        } catch (PrimaryBucketException notPrimary) {
            if (allowRetry) {
                if (isDebugEnabled) {
                    logger.debug("getFromBucket: {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
                }
                getRegionAdvisor().notPrimary(bucketId, retryNode);
                retryNode = getNodeForBucketReadOrLoad(bucketId);
            } else {
                throw notPrimary;
            }
        }
        // It's possible this is a GemFire thread e.g. ServerConnection
        // which got to this point because of a distributed system shutdown or
        // region closure which uses interrupt to break any sleep() or wait()
        // calls
        // e.g. waitForPrimary
        checkShutdown();
        count++;
        if (count == 1) {
            this.prStats.incGetOpsRetried();
        }
        this.prStats.incGetRetries();
        if (isDebugEnabled) {
            logger.debug("getFromBucket: Attempting to resend get to node {} after {} failed attempts", retryNode, count);
        }
    }
    // While
    // Fix for bug 36014
    PartitionedRegionDistributionException e = null;
    if (logger.isDebugEnabled()) {
        e = new PartitionedRegionDistributionException(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GET_IN_0_ATTEMPTS.toLocalizedString(count));
    }
    logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GET_IN_0_ATTEMPTS, count), e);
    return null;
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) TransactionException(org.apache.geode.cache.TransactionException) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException)

Aggregations

PRLocallyDestroyedException (org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException)13 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)6 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 Region (org.apache.geode.cache.Region)5 HashSet (java.util.HashSet)4 Set (java.util.Set)4 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)4 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)4 ResultsSet (org.apache.geode.cache.query.internal.ResultsSet)4 Cache (org.apache.geode.cache.Cache)3 PartitionedRegionDistributionException (org.apache.geode.cache.PartitionedRegionDistributionException)3 Entry (org.apache.geode.cache.Region.Entry)3 NonTXEntry (org.apache.geode.internal.cache.LocalRegion.NonTXEntry)3 FetchKeysResponse (org.apache.geode.internal.cache.partitioned.FetchKeysMessage.FetchKeysResponse)3 VersionSource (org.apache.geode.internal.cache.versions.VersionSource)3 VersionStamp (org.apache.geode.internal.cache.versions.VersionStamp)3 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)3 Host (org.apache.geode.test.dunit.Host)3 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)3 VM (org.apache.geode.test.dunit.VM)3