use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class DiskStoreImpl method handleFullAsyncQueue.
private void handleFullAsyncQueue(Object o) {
AsyncDiskEntry ade = (AsyncDiskEntry) o;
LocalRegion region = ade.region;
try {
VersionTag tag = ade.tag;
if (ade.versionOnly) {
DiskEntry.Helper.doAsyncFlush(tag, region);
} else {
DiskEntry entry = ade.de;
DiskEntry.Helper.handleFullAsyncQueue(entry, region, tag);
}
} catch (RegionDestroyedException ignore) {
// Normally we flush before closing or destroying a region
// but in some cases it is closed w/o flushing.
// So just ignore it; see bug 41305.
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class QueueRemovalMessage method process.
/**
* Extracts the region from the message list and hands over the message removal task to the
* executor
*/
@Override
protected void process(DistributionManager dm) {
final InternalCache cache;
// use GemFireCache.getInstance to avoid blocking during cache.xml processing.
cache = GemFireCacheImpl.getInstance();
if (cache != null) {
Iterator iterator = this.messagesList.iterator();
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
try {
while (iterator.hasNext()) {
final String regionName = (String) iterator.next();
final int size = (Integer) iterator.next();
final LocalRegion region = (LocalRegion) cache.getRegion(regionName);
final HARegionQueue hrq;
if (region == null || !region.isInitialized()) {
hrq = null;
} else {
HARegionQueue tmp = ((HARegion) region).getOwner();
if (tmp != null && tmp.isQueueInitialized()) {
hrq = tmp;
} else {
hrq = null;
}
}
// a bunch of event IDs to go through
for (int i = 0; i < size; i++) {
final EventID id = (EventID) iterator.next();
boolean interrupted = Thread.interrupted();
if (hrq == null || !hrq.isQueueInitialized()) {
continue;
}
try {
// {
try {
if (logger.isTraceEnabled()) {
logger.trace("QueueRemovalMessage: removing dispatched events on queue {} for {}", regionName, id);
}
hrq.removeDispatchedEvents(id);
} catch (RegionDestroyedException ignore) {
logger.info(LocalizedMessage.create(LocalizedStrings.QueueRemovalMessage_QUEUE_FOUND_DESTROYED_WHILE_PROCESSING_THE_LAST_DISPTACHED_SEQUENCE_ID_FOR_A_HAREGIONQUEUES_DACE_THE_EVENT_ID_IS_0_FOR_HAREGION_WITH_NAME_1, new Object[] { id, regionName }));
} catch (CancelException ignore) {
// cache or DS is closing
return;
} catch (CacheException e) {
logger.error(LocalizedMessage.create(LocalizedStrings.QueueRemovalMessage_QUEUEREMOVALMESSAGEPROCESSEXCEPTION_IN_PROCESSING_THE_LAST_DISPTACHED_SEQUENCE_ID_FOR_A_HAREGIONQUEUES_DACE_THE_PROBLEM_IS_WITH_EVENT_ID__0_FOR_HAREGION_WITH_NAME_1, new Object[] { regionName, id }), e);
} catch (InterruptedException ignore) {
// interrupt occurs during shutdown. this runs in an executor, so just stop
return;
// processing
}
} catch (RejectedExecutionException ignore) {
interrupted = true;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// if
}
// for
} finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
}
}
// cache != null
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class HARegionQueue method destroy.
/**
* destroys the underlying HARegion and removes its reference from the dispatched messages map
*/
public void destroy() throws CacheWriterException {
this.destroyInProgress = true;
Map tempDispatchedMessagesMap = dispatchedMessagesMap;
if (tempDispatchedMessagesMap != null) {
tempDispatchedMessagesMap.remove(this.regionName);
}
try {
try {
updateHAContainer();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
if (logger.isDebugEnabled()) {
logger.debug("HARegionQueue#destroy: ignored cancellation!!!!");
}
}
try {
this.region.destroyRegion();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
}
((HAContainerWrapper) haContainer).removeProxy(regionName);
} finally {
this.stats.close();
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class BucketProfileUpdateMessage method process.
@Override
protected void process(DistributionManager dm) {
try {
PartitionedRegion pr = PartitionedRegion.getPRFromId(this.prId);
// pr.waitOnBucketInitialization(); // While PR doesn't directly do GII, wait on this for
// bucket initialization -- mthomas 5/17/2007
pr.getRegionAdvisor().putBucketProfile(this.bucketId, this.profile);
} catch (PRLocallyDestroyedException fre) {
if (logger.isDebugEnabled())
logger.debug("<region locally destroyed> ///{}", this);
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled())
logger.debug("<region destroyed> ///{}", this);
} catch (CancelException e) {
if (logger.isDebugEnabled())
logger.debug("<cache closed> ///{}", this);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable ignore) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
} finally {
if (this.processorId != 0) {
ReplyMessage.send(getSender(), this.processorId, null, dm);
}
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegion method getDataRegionForRead.
@Override
public LocalRegion getDataRegionForRead(final KeyInfo keyInfo) {
final Object entryKey = keyInfo.getKey();
BucketRegion br;
try {
PartitionedRegionDataStore ds = getDataStore();
if (ds == null) {
throw new TransactionException(LocalizedStrings.PartitionedRegion_TX_ON_DATASTORE.toLocalizedString());
}
// TODO provide appropriate Operation and arg
int bucketId = keyInfo.getBucketId();
if (bucketId == KeyInfo.UNKNOWN_BUCKET) {
bucketId = PartitionedRegionHelper.getHashKey(this, null, entryKey, keyInfo.getValue(), keyInfo.getCallbackArg());
keyInfo.setBucketId(bucketId);
}
br = ds.getInitializedBucketWithKnownPrimaryForId(null, bucketId);
if (keyInfo.isCheckPrimary()) {
try {
br.checkForPrimary();
} catch (PrimaryBucketException pbe) {
throw new TransactionDataRebalancedException(LocalizedStrings.PartitionedRegion_TRANSACTIONAL_DATA_MOVED_DUE_TO_REBALANCING.toLocalizedString(), pbe);
}
}
} catch (RegionDestroyedException ignore) {
// TODO: why is this purposely not wrapping the original cause?
throw new TransactionDataNotColocatedException(LocalizedStrings.PartitionedRegion_KEY_0_NOT_COLOCATED_WITH_TRANSACTION.toLocalizedString(entryKey));
} catch (ForceReattemptException ignore) {
br = null;
}
return br;
}
Aggregations