use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class HARegionQueue method destroy.
/**
* destroys the underlying HARegion and removes its reference from the dispatched messages map
*/
public void destroy() throws CacheWriterException {
this.destroyInProgress = true;
Map tempDispatchedMessagesMap = dispatchedMessagesMap;
if (tempDispatchedMessagesMap != null) {
tempDispatchedMessagesMap.remove(this.regionName);
}
try {
try {
updateHAContainer();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
if (logger.isDebugEnabled()) {
logger.debug("HARegionQueue#destroy: ignored cancellation!!!!");
}
}
try {
this.region.destroyRegion();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
}
((HAContainerWrapper) haContainer).removeProxy(regionName);
} finally {
this.stats.close();
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class BucketProfileUpdateMessage method process.
@Override
protected void process(DistributionManager dm) {
try {
PartitionedRegion pr = PartitionedRegion.getPRFromId(this.prId);
// pr.waitOnBucketInitialization(); // While PR doesn't directly do GII, wait on this for
// bucket initialization -- mthomas 5/17/2007
pr.getRegionAdvisor().putBucketProfile(this.bucketId, this.profile);
} catch (PRLocallyDestroyedException fre) {
if (logger.isDebugEnabled())
logger.debug("<region locally destroyed> ///{}", this);
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled())
logger.debug("<region destroyed> ///{}", this);
} catch (CancelException e) {
if (logger.isDebugEnabled())
logger.debug("<cache closed> ///{}", this);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable ignore) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
} finally {
if (this.processorId != 0) {
ReplyMessage.send(getSender(), this.processorId, null, dm);
}
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegion method getDataRegionForRead.
@Override
public LocalRegion getDataRegionForRead(final KeyInfo keyInfo) {
final Object entryKey = keyInfo.getKey();
BucketRegion br;
try {
PartitionedRegionDataStore ds = getDataStore();
if (ds == null) {
throw new TransactionException(LocalizedStrings.PartitionedRegion_TX_ON_DATASTORE.toLocalizedString());
}
// TODO provide appropriate Operation and arg
int bucketId = keyInfo.getBucketId();
if (bucketId == KeyInfo.UNKNOWN_BUCKET) {
bucketId = PartitionedRegionHelper.getHashKey(this, null, entryKey, keyInfo.getValue(), keyInfo.getCallbackArg());
keyInfo.setBucketId(bucketId);
}
br = ds.getInitializedBucketWithKnownPrimaryForId(null, bucketId);
if (keyInfo.isCheckPrimary()) {
try {
br.checkForPrimary();
} catch (PrimaryBucketException pbe) {
throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(), pbe);
}
}
} catch (RegionDestroyedException ignore) {
// TODO: why is this purposely not wrapping the original cause?
throw new TransactionDataNotColocatedException(LocalizedStrings.PartitionedRegion_KEY_0_NOT_COLOCATED_WITH_TRANSACTION.toLocalizedString(entryKey));
} catch (ForceReattemptException ignore) {
br = null;
}
return br;
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegion method getFromBucket.
/**
* @param requestingClient the client requesting the object, or null if not from a client
* @param allowRetry if false then do not retry
*/
private Object getFromBucket(final InternalDistributedMember targetNode, int bucketId, final Object key, final Object aCallbackArgument, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean allowRetry) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final int retryAttempts = calcRetry();
Object obj;
// retry the get remotely until it finds the right node managing the bucket
int count = 0;
RetryTimeKeeper retryTime = null;
InternalDistributedMember retryNode = targetNode;
while (count <= retryAttempts) {
// Every continuation should check for DM cancellation
if (retryNode == null) {
checkReadiness();
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
retryNode = getNodeForBucketReadOrLoad(bucketId);
// No storage found for bucket, early out preventing hot loop, bug 36819
if (retryNode == null) {
checkShutdown();
return null;
}
continue;
}
final boolean isLocal = this.localMaxMemory > 0 && retryNode.equals(getMyId());
try {
if (isLocal) {
obj = this.dataStore.getLocally(bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, false);
} else {
if (this.haveCacheLoader) {
/* MergeGemXDHDFSToGFE -readoing from local bucket was disabled in GemXD */
if (null != (obj = getFromLocalBucket(bucketId, key, aCallbackArgument, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones))) {
return obj;
}
}
obj = getRemotely(retryNode, bucketId, key, aCallbackArgument, preferCD, requestingClient, clientEvent, returnTombstones);
// TODO: there should be better way than this one
String name = Thread.currentThread().getName();
if (name.startsWith("ServerConnection") && !getMyId().equals(retryNode)) {
setNetworkHopType(bucketId, (InternalDistributedMember) retryNode);
}
}
return obj;
} catch (PRLocallyDestroyedException pde) {
if (isDebugEnabled) {
logger.debug("getFromBucket Encountered PRLocallyDestroyedException", pde);
}
checkReadiness();
if (allowRetry) {
retryNode = getNodeForBucketReadOrLoad(bucketId);
} else {
// Only transactions set allowRetry to false,
// fail the transaction here as region is destroyed.
Throwable cause = pde.getCause();
if (cause != null && cause instanceof RegionDestroyedException) {
throw (RegionDestroyedException) cause;
} else {
// set the cause to RegionDestroyedException.
throw new RegionDestroyedException(toString(), getFullPath());
}
}
} catch (ForceReattemptException prce) {
prce.checkKey(key);
checkReadiness();
if (allowRetry) {
InternalDistributedMember lastNode = retryNode;
if (isDebugEnabled) {
logger.debug("getFromBucket: retry attempt: {} of {}", count, retryAttempts, prce);
}
retryNode = getNodeForBucketReadOrLoad(bucketId);
if (lastNode.equals(retryNode)) {
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
if (isDebugEnabled) {
logger.debug("waiting to retry node {}", retryNode);
}
retryTime.waitToRetryNode();
}
} else {
// with transaction
if (prce instanceof BucketNotFoundException) {
throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(key), prce);
}
Throwable cause = prce.getCause();
if (cause instanceof PrimaryBucketException) {
throw (PrimaryBucketException) cause;
} else if (cause instanceof TransactionDataRebalancedException) {
throw (TransactionDataRebalancedException) cause;
} else if (cause instanceof RegionDestroyedException) {
throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(key), cause);
} else {
// Should not see it currently, added to be protected against future changes.
throw new TransactionException("Failed to get key: " + key, prce);
}
}
} catch (PrimaryBucketException notPrimary) {
if (allowRetry) {
if (isDebugEnabled) {
logger.debug("getFromBucket: {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
}
getRegionAdvisor().notPrimary(bucketId, retryNode);
retryNode = getNodeForBucketReadOrLoad(bucketId);
} else {
throw notPrimary;
}
}
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary
checkShutdown();
count++;
if (count == 1) {
this.prStats.incGetOpsRetried();
}
this.prStats.incGetRetries();
if (isDebugEnabled) {
logger.debug("getFromBucket: Attempting to resend get to node {} after {} failed attempts", retryNode, count);
}
}
// While
// Fix for bug 36014
PartitionedRegionDistributionException e = null;
if (logger.isDebugEnabled()) {
e = new PartitionedRegionDistributionException(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GET_IN_0_ATTEMPTS.toLocalizedString(count));
}
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionRegion_NO_VM_AVAILABLE_FOR_GET_IN_0_ATTEMPTS, count), e);
return null;
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegion method virtualPut.
@Override
boolean virtualPut(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, boolean overwriteDestroyed) throws TimeoutException, CacheWriterException {
final long startTime = PartitionedRegionStats.startTime();
boolean result = false;
final DistributedPutAllOperation putAllOp_save = event.setPutAllOperation(null);
if (event.getEventId() == null) {
event.setNewEventId(this.cache.getDistributedSystem());
}
boolean bucketStorageAssigned = true;
try {
final Integer bucketId = event.getKeyInfo().getBucketId();
assert bucketId != KeyInfo.UNKNOWN_BUCKET;
// check in bucket2Node region
InternalDistributedMember targetNode = getNodeForBucketWrite(bucketId, null);
// and to optimize distribution.
if (logger.isDebugEnabled()) {
logger.debug("PR.virtualPut putting event={}", event);
}
if (targetNode == null) {
try {
bucketStorageAssigned = false;
targetNode = createBucket(bucketId, event.getNewValSizeForPR(), null);
} catch (PartitionedRegionStorageException e) {
// try not to throw a PRSE if the cache is closing or this region was
// destroyed during createBucket() (bug 36574)
this.checkReadiness();
if (this.cache.isClosed()) {
throw new RegionDestroyedException(toString(), getFullPath());
}
throw e;
}
}
if (event.isBridgeEvent() && bucketStorageAssigned) {
setNetworkHopType(bucketId, targetNode);
}
if (putAllOp_save == null) {
result = putInBucket(targetNode, bucketId, event, ifNew, ifOld, expectedOldValue, requireOldValue, (ifNew ? 0L : lastModified));
if (logger.isDebugEnabled()) {
logger.debug("PR.virtualPut event={} ifNew={} ifOld={} result={}", event, ifNew, ifOld, result);
}
} else {
// fix for 40502
checkIfAboveThreshold(event);
// putAll: save the bucket id into DPAO, then wait for postPutAll to send msg
// at this time, DPAO's PutAllEntryData should be empty, we should add entry here with
// bucket id
// the message will be packed in postPutAll, include the one to local bucket, because the
// buckets
// could be changed at that time
putAllOp_save.addEntry(event, bucketId);
if (logger.isDebugEnabled()) {
logger.debug("PR.virtualPut PutAll added event={} into bucket {}", event, bucketId);
}
result = true;
}
} catch (RegionDestroyedException rde) {
if (!rde.getRegionFullPath().equals(getFullPath())) {
throw new RegionDestroyedException(toString(), getFullPath(), rde);
}
} finally {
if (putAllOp_save == null) {
// only for normal put
if (ifNew) {
this.prStats.endCreate(startTime);
} else {
this.prStats.endPut(startTime);
}
}
}
if (!result) {
checkReadiness();
if (!ifNew && !ifOld && !this.concurrencyChecksEnabled) {
// may fail due to concurrency conflict
// failed for unknown reason
// throw new PartitionedRegionStorageException("unable to execute operation");
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_PRVIRTUALPUT_RETURNING_FALSE_WHEN_IFNEW_AND_IFOLD_ARE_BOTH_FALSE), new Exception(LocalizedStrings.PartitionedRegion_STACK_TRACE.toLocalizedString()));
}
}
return result;
}
Aggregations