use of org.apache.geode.CancelException in project geode by apache.
the class DistributedRegion method distributedRegionCleanup.
protected void distributedRegionCleanup(RegionEventImpl event) {
if (event == null || event.getOperation() != Operation.REGION_REINITIALIZE) {
// wake up any threads in waitForRequiredRoles... they will checkReadiness
synchronized (this.missingRequiredRoles) {
this.missingRequiredRoles.notifyAll();
}
}
if (this.persistenceAdvisor != null) {
// fix for bug 41094
this.persistenceAdvisor.close();
}
this.distAdvisor.close();
// Fix for bug 46338. Wait for in progress clears before destroying the
// lock service, because destroying the service immediately releases the dlock
waitForInProgressClear();
DLockService dls = null;
synchronized (this.dlockMonitor) {
if (this.dlockService != null) {
dls = (DLockService) this.dlockService;
}
}
if (dls != null) {
try {
dls.destroyAndRemove();
} catch (CancelException e) {
// bug 37118
if (logger.isDebugEnabled()) {
logger.debug("DLS destroy abridged due to shutdown", e);
}
} catch (Exception ex) {
logger.warn(LocalizedMessage.create(LocalizedStrings.DistributedRegion_DLS_DESTROY_MAY_HAVE_FAILED_FOR_0, this.getFullPath()), ex);
}
}
// Fix for #48066 - make sure that region operations are completely
// distributed to peers before destroying the region.
long timeout = 1000L * getCache().getInternalDistributedSystem().getConfig().getAckWaitThreshold();
Boolean flushOnClose = // test hook
!Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "no-flush-on-close");
if (!this.cache.forcedDisconnect() && flushOnClose && this.getDistributionManager().getMembershipManager() != null && this.getDistributionManager().getMembershipManager().isConnected()) {
getDistributionAdvisor().forceNewMembershipVersion();
try {
getDistributionAdvisor().waitForCurrentOperations(timeout);
} catch (Exception e) {
// log this but try to close the region so that listeners are invoked
logger.warn(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_ERROR_CLOSING_REGION_1, new Object[] { this, getFullPath() }), e);
}
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class PartitionedRegion method postRemoveAllSend.
@Override
public long postRemoveAllSend(DistributedRemoveAllOperation op, VersionedObjectList successfulOps) {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (cache.isCacheAtShutdownAll()) {
throw new CacheClosedException("Cache is shutting down");
}
final long startTime = PartitionedRegionStats.startTime();
// build all the msgs by bucketid
HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
// clear the successfulOps list since we're actually doing the removes here
// and the basicRemoveAll work was just a way to build the "op" object
Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulOps.size());
successfulOps.clearVersions();
Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
while (itor.hasNext()) {
Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
Integer bucketId = (Integer) mapEntry.getKey();
RemoveAllPRMessage prMsg = mapEntry.getValue();
checkReadiness();
long then = 0;
if (isDebugEnabled) {
then = System.currentTimeMillis();
}
try {
VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
if (versions.size() > 0) {
partialKeys.addKeysAndVersions(versions);
versions.saveVersions(keyToVersionMap);
} else if (!this.concurrencyChecksEnabled) {
// no keys returned if not versioned
Set keys = prMsg.getKeys();
partialKeys.addKeys(keys);
}
} catch (PutAllPartialResultException pre) {
// sendMsgByBucket applied partial keys
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered BulkOpPartialResultException, ", pre);
}
partialKeys.consolidate(pre.getResult());
} catch (Exception ex) {
// If failed at other exception
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered exception at sendMsgByBucket, ", ex);
}
@Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
try {
partialKeys.saveFailedKey(firstEvent.getKey(), ex);
} finally {
firstEvent.release();
}
}
if (isDebugEnabled) {
long now = System.currentTimeMillis();
if ((now - then) >= 10000) {
logger.debug("PR.sendMsgByBucket took {} ms", (now - then));
}
}
}
this.prStats.endRemoveAll(startTime);
if (!keyToVersionMap.isEmpty()) {
for (Iterator it = successfulOps.getKeys().iterator(); it.hasNext(); ) {
successfulOps.addVersion(keyToVersionMap.get(it.next()));
}
keyToVersionMap.clear();
}
if (partialKeys.hasFailure()) {
logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (op.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
throw (CancelException) partialKeys.getFailure();
} else {
throw new PutAllPartialResultException(partialKeys);
}
} else {
if (partialKeys.getFailure() instanceof RuntimeException) {
throw (RuntimeException) partialKeys.getFailure();
} else {
throw new RuntimeException(partialKeys.getFailure());
}
}
}
return -1;
}
use of org.apache.geode.CancelException in project geode by apache.
the class PRHARedundancyProvider method createBackupBucketOnMember.
/**
* Creates bucket with ID bucketId on targetNode. This method will also create the bucket for all
* of the child colocated PRs.
*
* @param bucketId
* @param targetNMember
* @param isRebalance true if bucket creation is directed by rebalancing
* @param replaceOfflineData
* @return true if the bucket was sucessfully created
*/
public boolean createBackupBucketOnMember(final int bucketId, final InternalDistributedMember targetNMember, final boolean isRebalance, boolean replaceOfflineData, InternalDistributedMember moveSource, boolean forceCreation) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember for bucketId={} member: {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
if (!(targetNMember.equals(this.prRegion.getMyId()))) {
// final StoppableReentrantReadWriteLock.StoppableReadLock isClosingReadLock;
PartitionProfile pp = this.prRegion.getRegionAdvisor().getPartitionProfile(targetNMember);
if (pp != null) {
// isClosingReadLock = pp.getIsClosingReadLock(
// this.prRegion.getCancelCriterion());
} else {
return false;
}
try {
ManageBackupBucketMessage.NodeResponse response = ManageBackupBucketMessage.send(targetNMember, this.prRegion, bucketId, isRebalance, replaceOfflineData, moveSource, forceCreation);
if (response.waitForAcceptance()) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Bucket creation succeed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
return true;
} else {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Bucket creation failed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
return false;
}
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (e instanceof ForceReattemptException) {
// no log needed see bug 37569
} else if (e instanceof CancelException || (e.getCause() != null && (e.getCause() instanceof CancelException))) {
// no need to log exceptions caused by cache closure
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_EXCEPTION_CREATING_PARTITION_ON__0, targetNMember), e);
}
return false;
}
} else {
final PartitionedRegionDataStore prDS = this.prRegion.getDataStore();
boolean bucketManaged = prDS != null && prDS.grabBucket(bucketId, moveSource, forceCreation, replaceOfflineData, isRebalance, null, false).equals(CreateBucketResult.CREATED);
if (!bucketManaged) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Local data store refused to accommodate the data for bucketId={} prDS={}", this.prRegion.bucketStringForLogs(bucketId), prDS);
}
}
return bucketManaged;
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class PRHARedundancyProvider method createBucketOnMember.
/**
* Creates bucket with ID bucketId on targetNode.
*
* @param bucketId
* @param targetNMember
* @param newBucketSize
* @param forceCreation inform the targetMember it must attempt host the bucket, appropriately
* ignoring it's maximums
* @return a response object
*/
public ManageBucketRsp createBucketOnMember(final int bucketId, final InternalDistributedMember targetNMember, final int newBucketSize, boolean forceCreation) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember for bucketId={} member: {}{}", this.prRegion.bucketStringForLogs(bucketId), targetNMember, (forceCreation ? " forced" : ""));
}
if (!(targetNMember.equals(this.prRegion.getMyId()))) {
// final StoppableReentrantReadWriteLock.StoppableReadLock isClosingReadLock;
PartitionProfile pp = this.prRegion.getRegionAdvisor().getPartitionProfile(targetNMember);
if (pp != null) {
// isClosingReadLock = pp.getIsClosingReadLock(
// this.prRegion.getCancelCriterion());
} else {
return ManageBucketRsp.NO;
}
try {
// isClosingReadLock.lock(); // Grab the read lock, preventing any region closures
// on this remote Node until this bucket is fully published, forcing the closing
// Node to recognize any pre-natal buckets.
NodeResponse response = ManageBucketMessage.send(targetNMember, this.prRegion, bucketId, newBucketSize, forceCreation);
if (response.waitForAcceptance()) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Bucket creation succeed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
// lockList.add(isClosingReadLock);
return ManageBucketRsp.YES;
} else {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Bucket creation failed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
// isClosingReadLock.unlock();
return response.rejectedDueToInitialization() ? ManageBucketRsp.NO_INITIALIZING : ManageBucketRsp.NO;
}
} catch (PartitionOfflineException e) {
throw e;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (e instanceof CancelException || (e.getCause() != null && (e.getCause() instanceof CancelException))) {
// no need to log exceptions caused by cache closure
return ManageBucketRsp.CLOSED;
} else if (e instanceof ForceReattemptException) {
// no log needed see bug 37569
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_EXCEPTION_CREATING_PARTITION_ON__0, targetNMember), e);
}
// isClosingReadLock.unlock();
return ManageBucketRsp.NO;
}
} else {
final PartitionedRegionDataStore prDS = this.prRegion.getDataStore();
boolean bucketManaged = prDS != null && prDS.handleManageBucketRequest(bucketId, newBucketSize, this.prRegion.getMyId(), forceCreation);
if (!bucketManaged) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Local data store not able to accommodate the data for bucketId={}", this.prRegion.bucketStringForLogs(bucketId));
}
}
return ManageBucketRsp.valueOf(bucketManaged);
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class PartitionedRegion method sendDumpB2NRegionForBucket.
/**
* Sends a message to all the {@code PartitionedRegion} participants, telling each member of the
* PartitionedRegion to dump the nodelist in bucket2node metadata for specified bucketId.
*/
public void sendDumpB2NRegionForBucket(int bucketId) {
getRegionAdvisor().dumpProfiles("dumpB2NForBucket");
try {
PartitionResponse response = DumpB2NRegion.send(this.getRegionAdvisor().adviseAllPRNodes(), this, bucketId, false);
response.waitForRepliesUninterruptibly();
this.dumpB2NForBucket(bucketId);
} catch (ReplyException re) {
if (logger.isDebugEnabled()) {
logger.debug("sendDumpB2NRegionForBucket got ReplyException", re);
}
} catch (CancelException e) {
if (logger.isDebugEnabled()) {
logger.debug("sendDumpB2NRegionForBucket got CacheClosedException", e);
}
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("sendDumpB2RegionForBucket got RegionDestroyedException", e);
}
}
}
Aggregations