use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method containsKeyInBucket.
boolean containsKeyInBucket(final InternalDistributedMember targetNode, final Integer bucketIdInt, final Object key, boolean valueCheck) {
final int retryAttempts = calcRetry();
if (logger.isDebugEnabled()) {
logger.debug("containsKeyInBucket: {}{} ({}) from: {} bucketId={}", (valueCheck ? "ValueForKey key=" : "Key key="), key, key.hashCode(), targetNode, bucketStringForLogs(bucketIdInt));
}
boolean ret;
int count = 0;
RetryTimeKeeper retryTime = null;
InternalDistributedMember retryNode = targetNode;
while (count <= retryAttempts) {
// Every continuation should check for DM cancellation
if (retryNode == null) {
checkReadiness();
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
// No storage found for bucket, early out preventing hot loop, bug 36819
if (retryNode == null) {
// Prefer closed style exceptions over empty result
checkShutdown();
return false;
}
continue;
}
// retryNode != null
try {
final boolean loc = retryNode.equals(getMyId());
if (loc) {
if (valueCheck) {
ret = this.dataStore.containsValueForKeyLocally(bucketIdInt, key);
} else {
ret = this.dataStore.containsKeyLocally(bucketIdInt, key);
}
} else {
if (valueCheck) {
ret = containsValueForKeyRemotely(retryNode, bucketIdInt, key);
} else {
ret = containsKeyRemotely(retryNode, bucketIdInt, key);
}
}
return ret;
} catch (PRLocallyDestroyedException pde) {
if (logger.isDebugEnabled()) {
logger.debug("containsKeyInBucket: Encountered PRLocallyDestroyedException", pde);
}
checkReadiness();
} catch (ForceReattemptException prce) {
prce.checkKey(key);
if (logger.isDebugEnabled()) {
logger.debug("containsKeyInBucket: retry attempt:{} of {}", count, retryAttempts, prce);
}
checkReadiness();
InternalDistributedMember lastNode = retryNode;
retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
if (lastNode.equals(retryNode)) {
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
retryTime.waitToRetryNode();
}
} catch (PrimaryBucketException notPrimary) {
if (logger.isDebugEnabled()) {
logger.debug("containsKeyInBucket {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
}
getRegionAdvisor().notPrimary(bucketIdInt, retryNode);
retryNode = getOrCreateNodeForBucketRead(bucketIdInt);
} catch (RegionDestroyedException rde) {
if (!rde.getRegionFullPath().equals(getFullPath())) {
throw new RegionDestroyedException(toString(), getFullPath(), rde);
}
}
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary
checkShutdown();
count++;
if (count == 1) {
this.prStats.incContainsKeyValueOpsRetried();
}
this.prStats.incContainsKeyValueRetries();
}
StringId msg = null;
if (valueCheck) {
msg = LocalizedStrings.PartitionedRegion_NO_VM_AVAILABLE_FOR_CONTAINS_VALUE_FOR_KEY_IN_1_ATTEMPTS;
} else {
msg = LocalizedStrings.PartitionedRegion_NO_VM_AVAILABLE_FOR_CONTAINS_KEY_IN_1_ATTEMPTS;
}
Integer countInteger = count;
// Fix for bug 36014
PartitionedRegionDistributionException e = null;
if (logger.isDebugEnabled()) {
e = new PartitionedRegionDistributionException(msg.toLocalizedString(countInteger));
}
logger.warn(LocalizedMessage.create(msg, countInteger), e);
return false;
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method fetchAllLocalKeys.
public Set fetchAllLocalKeys(Integer id, Set<Integer> failures, String regex) {
Set result = new HashSet();
try {
Set keys = null;
if (regex != null) {
keys = this.dataStore.handleRemoteGetKeys(id, InterestType.REGULAR_EXPRESSION, regex, true);
} else {
keys = this.dataStore.getKeysLocally(id, true);
}
result.addAll(keys);
} catch (ForceReattemptException ignore) {
failures.add(id);
} catch (PRLocallyDestroyedException ignore) {
failures.add(id);
}
return result;
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class ColocationHelper method getColocatedRegion.
/**
* An utility method to retrieve colocated region of a given partitioned region
*
* @return colocated PartitionedRegion
* @throws IllegalStateException for missing colocated region
* @since GemFire 5.8Beta
*/
public static PartitionedRegion getColocatedRegion(final PartitionedRegion partitionedRegion) {
// precondition1
Assert.assertTrue(partitionedRegion != null);
String colocatedWith = partitionedRegion.getPartitionAttributes().getColocatedWith();
if (colocatedWith == null) {
// the region is not colocated with any region
return null;
}
Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache());
PartitionRegionConfig prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWith));
if (prConf == null) {
throw new IllegalStateException(LocalizedStrings.ColocationHelper_REGION_SPECIFIED_IN_COLOCATEDWITH_DOES_NOT_EXIST.toLocalizedString(new Object[] { colocatedWith, partitionedRegion.getFullPath() }));
}
int prID = prConf.getPRId();
PartitionedRegion colocatedPR = null;
try {
colocatedPR = PartitionedRegion.getPRFromId(prID);
if (colocatedPR != null) {
colocatedPR.waitOnBucketMetadataInitialization();
} else {
throw new IllegalStateException(LocalizedStrings.ColocationHelper_REGION_SPECIFIED_IN_COLOCATEDWITH_DOES_NOT_EXIST.toLocalizedString(new Object[] { colocatedWith, partitionedRegion.getFullPath() }));
}
} catch (PRLocallyDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("PRLocallyDestroyedException : Region with prId={} is locally destroyed on this node", prID, e);
}
}
return colocatedPR;
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method _getKeysWithInterest.
/**
* finds all the keys matching the given interest type and passes them to the given collector
*
* @param allowTombstones whether to return destroyed entries
*/
private void _getKeysWithInterest(int interestType, Object interestArg, boolean allowTombstones, SetCollector collector) throws IOException {
// this could be parallelized by building up a list of buckets for each
// vm and sending out the requests for keys in parallel. That might dump
// more onto this vm in one swoop than it could handle, though, so we're
// keeping it simple for now
int totalBuckets = getTotalNumberOfBuckets();
int retryAttempts = calcRetry();
for (int bucket = 0; bucket < totalBuckets; bucket++) {
Set bucketSet = null;
Integer lbucket = bucket;
final RetryTimeKeeper retryTime = new RetryTimeKeeper(Integer.MAX_VALUE);
InternalDistributedMember bucketNode = getOrCreateNodeForBucketRead(lbucket);
for (int count = 0; count <= retryAttempts; count++) {
if (logger.isDebugEnabled()) {
logger.debug("_getKeysWithInterest bucketId={} attempt={}", bucket, (count + 1));
}
try {
if (bucketNode != null) {
if (bucketNode.equals(getMyId())) {
bucketSet = this.dataStore.handleRemoteGetKeys(lbucket, interestType, interestArg, allowTombstones);
} else {
FetchKeysResponse r = FetchKeysMessage.sendInterestQuery(bucketNode, this, lbucket, interestType, interestArg, allowTombstones);
bucketSet = r.waitForKeys();
}
}
break;
} catch (PRLocallyDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("_getKeysWithInterest: Encountered PRLocallyDestroyedException");
}
checkReadiness();
} catch (ForceReattemptException prce) {
// no checkKey possible
if (logger.isDebugEnabled()) {
logger.debug("_getKeysWithInterest: retry attempt: {}", count, prce);
}
checkReadiness();
InternalDistributedMember lastTarget = bucketNode;
bucketNode = getOrCreateNodeForBucketRead(lbucket);
if (lastTarget != null && lastTarget.equals(bucketNode)) {
if (retryTime.overMaximum()) {
break;
}
retryTime.waitToRetryNode();
}
}
}
// for(count)
if (bucketSet != null) {
collector.receiveSet(bucketSet);
}
}
// for(bucket)
}
use of org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException in project geode by apache.
the class PartitionedRegion method validatePRID.
/**
* Verify that the given prId is correct for the given region name in this vm
*
* @param sender the member requesting validation
* @param prId the ID being used for the pr by the sender
* @param regionId the regionIdentifier used for prId by the sender
*/
public static void validatePRID(InternalDistributedMember sender, int prId, String regionId) {
try {
PartitionedRegion pr = null;
synchronized (prIdToPR) {
// first do a quick probe
pr = (PartitionedRegion) prIdToPR.getRegion(prId);
}
if (pr != null && !pr.isLocallyDestroyed && pr.getRegionIdentifier().equals(regionId)) {
return;
}
} catch (RegionDestroyedException ignore) {
// ignore and do full pass over prid map
} catch (PartitionedRegionException ignore) {
// ditto
} catch (PRLocallyDestroyedException ignore) {
// ignore and do full check
}
synchronized (prIdToPR) {
for (Iterator it = prIdToPR.values().iterator(); it.hasNext(); ) {
Object o = it.next();
if (o instanceof String) {
continue;
}
PartitionedRegion pr = (PartitionedRegion) o;
if (pr.getPRId() == prId) {
if (!pr.getRegionIdentifier().equals(regionId)) {
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_0_IS_USING_PRID_1_FOR_2_BUT_THIS_PROCESS_MAPS_THAT_PRID_TO_3, new Object[] { sender.toString(), prId, pr.getRegionIdentifier() }));
}
} else if (pr.getRegionIdentifier().equals(regionId)) {
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_0_IS_USING_PRID_1_FOR_2_BUT_THIS_PROCESS_IS_USING_PRID_3, new Object[] { sender, prId, pr.getRegionIdentifier(), pr.getPRId() }));
}
}
}
}
Aggregations