use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method getSomeKeys.
/**
* Test Method: Get a random set of keys from a randomly selected bucket using the provided
* {@code Random} number generator.
*
* @return A set of keys from a randomly chosen bucket or {@link Collections#EMPTY_SET}
*/
public Set getSomeKeys(Random rnd) throws IOException, ClassNotFoundException {
InternalDistributedMember nod = null;
Integer buck = null;
Set buks = getRegionAdvisor().getBucketSet();
if (buks != null && !buks.isEmpty()) {
Object[] buksA = buks.toArray();
Set ret = null;
// Randomly pick a node to get some data from
for (int i = 0; i < buksA.length; i++) {
try {
logger.debug("getSomeKeys: iteration: {}", i);
int ind = rnd.nextInt(buksA.length);
if (ind >= buksA.length) {
// The GSRandom.nextInt(int) may return a value that includes the
// maximum.
ind = buksA.length - 1;
}
buck = (Integer) buksA[ind];
nod = getNodeForBucketRead(buck);
if (nod != null) {
logger.debug("getSomeKeys: iteration: {} for node {}", i, nod);
if (nod.equals(getMyId())) {
ret = dataStore.handleRemoteGetKeys(buck, InterestType.REGULAR_EXPRESSION, ".*", false);
} else {
FetchKeysResponse r = FetchKeysMessage.send(nod, this, buck, false);
ret = r.waitForKeys();
}
if (ret != null && !ret.isEmpty()) {
return ret;
}
}
} catch (ForceReattemptException movinOn) {
checkReadiness();
logger.debug("Test hook getSomeKeys caught a ForceReattemptException for bucketId={}{}{}. Moving on to another bucket", getPRId(), BUCKET_ID_SEPARATOR, buck, movinOn);
continue;
} catch (PRLocallyDestroyedException ignore) {
logger.debug("getSomeKeys: Encountered PRLocallyDestroyedException");
checkReadiness();
continue;
}
}
// nod != null
}
// for
logger.debug("getSomeKeys: no keys found returning empty set");
return Collections.emptySet();
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method getEntryInBucket.
protected EntrySnapshot getEntryInBucket(final DistributedMember targetNode, final int bucketId, final Object key, boolean access, final boolean allowTombstones) {
final int retryAttempts = calcRetry();
if (logger.isTraceEnabled()) {
logger.trace("getEntryInBucket: " + "Key key={} ({}) from: {} bucketId={}", key, key.hashCode(), targetNode, bucketStringForLogs(bucketId));
}
Integer bucketIdInt = bucketId;
EntrySnapshot ret = null;
int count = 0;
RetryTimeKeeper retryTime = null;
InternalDistributedMember retryNode = (InternalDistributedMember) targetNode;
while (count <= retryAttempts) {
// Every continuation should check for DM cancellation
if (retryNode == null) {
checkReadiness();
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
retryNode = getOrCreateNodeForBucketRead(bucketId);
// No storage found for bucket, early out preventing hot loop, bug 36819
if (retryNode == null) {
checkShutdown();
return null;
}
continue;
}
try {
final boolean loc = (this.localMaxMemory > 0) && retryNode.equals(getMyId());
if (loc) {
ret = this.dataStore.getEntryLocally(bucketId, key, access, allowTombstones);
} else {
ret = getEntryRemotely(retryNode, bucketIdInt, key, access, allowTombstones);
// TODO:Suranjan&Yogesh : there should be better way than this one
String name = Thread.currentThread().getName();
if (name.startsWith("ServerConnection") && !getMyId().equals(targetNode)) {
setNetworkHopType(bucketIdInt, (InternalDistributedMember) targetNode);
}
}
return ret;
} catch (PRLocallyDestroyedException pde) {
if (logger.isDebugEnabled()) {
logger.debug("getEntryInBucket: Encountered PRLocallyDestroyedException", pde);
}
checkReadiness();
} catch (EntryNotFoundException ignore) {
return null;
} catch (ForceReattemptException prce) {
prce.checkKey(key);
if (logger.isDebugEnabled()) {
logger.debug("getEntryInBucket: retrying, attempts so far: {}", count, prce);
}
checkReadiness();
InternalDistributedMember lastNode = retryNode;
retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
if (lastNode.equals(retryNode)) {
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
retryTime.waitToRetryNode();
}
} catch (PrimaryBucketException notPrimary) {
if (logger.isDebugEnabled()) {
logger.debug("Bucket {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
}
getRegionAdvisor().notPrimary(bucketIdInt, retryNode);
retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
}
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary
checkShutdown();
count++;
if (count == 1) {
this.prStats.incContainsKeyValueOpsRetried();
}
this.prStats.incContainsKeyValueRetries();
}
// Fix for bug 36014
PartitionedRegionDistributionException e = null;
if (logger.isDebugEnabled()) {
e = new PartitionedRegionDistributionException(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GETENTRY_IN_0_ATTEMPTS.toLocalizedString(count));
}
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GETENTRY_IN_0_ATTEMPTS, count), e);
return null;
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method getBucketKeys.
/**
* Fetch the keys for the given bucket identifier, if the bucket is local or remote. This version
* of the method allows you to retrieve Tombstone entries as well as undestroyed entries.
*
* @param allowTombstones whether to include destroyed entries in the result
* @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no keys can be found.
*/
public Set getBucketKeys(int bucketNum, boolean allowTombstones) {
Integer buck = bucketNum;
final int retryAttempts = calcRetry();
Set ret = null;
int count = 0;
InternalDistributedMember nod = getOrCreateNodeForBucketRead(bucketNum);
RetryTimeKeeper snoozer = null;
while (count <= retryAttempts) {
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary or waitForBucketRecovery
checkShutdown();
if (nod == null) {
if (snoozer == null) {
snoozer = new RetryTimeKeeper(this.retryTimeout);
}
nod = getOrCreateNodeForBucketRead(bucketNum);
// No storage found for bucket, early out preventing hot loop, bug 36819
if (nod == null) {
checkShutdown();
break;
}
count++;
continue;
}
try {
if (nod.equals(getMyId())) {
ret = this.dataStore.getKeysLocally(buck, allowTombstones);
} else {
FetchKeysResponse r = FetchKeysMessage.send(nod, this, buck, allowTombstones);
ret = r.waitForKeys();
}
if (ret != null) {
return ret;
}
} catch (PRLocallyDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: Encountered PRLocallyDestroyedException");
}
checkReadiness();
} catch (ForceReattemptException prce) {
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: attempt:{}", (count + 1), prce);
}
checkReadiness();
if (snoozer == null) {
snoozer = new RetryTimeKeeper(this.retryTimeout);
}
InternalDistributedMember oldNode = nod;
nod = getNodeForBucketRead(buck);
if (nod != null && nod.equals(oldNode)) {
if (snoozer.overMaximum()) {
checkReadiness();
throw new TimeoutException(LocalizedStrings.PartitionedRegion_ATTEMPT_TO_ACQUIRE_PRIMARY_NODE_FOR_READ_ON_BUCKET_0_TIMED_OUT_IN_1_MS.toLocalizedString(new Object[] { getBucketName(buck), snoozer.getRetryTime() }));
}
snoozer.waitToRetryNode();
}
}
count++;
}
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: no keys found returning empty set");
}
return Collections.emptySet();
}
Aggregations