use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class HeapEvictor method onEvent.
@Override
public void onEvent(final MemoryEvent event) {
if (DISABLE_HEAP_EVICTIOR_THREAD_POOL) {
return;
}
// in this VM ...
if (isRunning() && event.isLocal()) {
if (event.getState().isEviction()) {
final LogWriter logWriter = cache.getLogger();
// Have we previously received an eviction event and already started eviction ...
if (this.mustEvict.get() == true) {
if (logWriter.fineEnabled()) {
logWriter.fine("Updating eviction in response to memory event: " + event + ". previousBytesUsed=" + previousBytesUsed);
}
// to update the number of fast loops to perform.
synchronized (evictionLock) {
numEvictionLoopsCompleted = 0;
numFastLoops = (int) ((event.getBytesUsed() - event.getThresholds().getEvictionThresholdClearBytes() + getTotalBytesToEvict()) / getTotalBytesToEvict());
evictionLock.notifyAll();
}
// already a thread running the evictions, so we're done.
return;
}
if (!this.mustEvict.compareAndSet(false, true)) {
// Another thread just started evicting.
return;
}
numEvictionLoopsCompleted = 0;
numFastLoops = (int) ((event.getBytesUsed() - event.getThresholds().getEvictionThresholdClearBytes() + getTotalBytesToEvict()) / getTotalBytesToEvict());
if (logWriter.fineEnabled()) {
logWriter.fine("Starting eviction in response to memory event: " + event);
}
// The new thread which will run in a loop performing evictions
final Runnable evictionManagerTask = new Runnable() {
@Override
public void run() {
// Has the test hook been set which will cause eviction to abort early
if (numEvictionLoopsCompleted < testAbortAfterLoopCount) {
try {
// Submit tasks into the queue to do the evictions
if (EVICT_HIGH_ENTRY_COUNT_BUCKETS_FIRST) {
createAndSubmitWeightedRegionEvictionTasks();
} else {
for (RegionEvictorTask task : createRegionEvictionTasks()) {
executeInThreadPool(task);
}
}
RegionEvictorTask.setLastTaskCompletionTime(System.currentTimeMillis());
// and changing the number of fast loops to perform.
synchronized (evictionLock) {
int delayTime = getEvictionLoopDelayTime();
if (logWriter.fineEnabled()) {
logWriter.fine("Eviction loop delay time calculated to be " + delayTime + " milliseconds. Fast Loops=" + numFastLoops + ", Loop #=" + numEvictionLoopsCompleted + 1);
}
numEvictionLoopsCompleted++;
try {
// Wait and release the lock so that the number of fast loops
// needed can be updated by another thread processing a new
// eviction event.
evictionLock.wait(delayTime);
} catch (InterruptedException iex) {
// Loop and try again
}
}
// Do we think we're still above the eviction threshold ...
if (HeapEvictor.this.mustEvict.get()) {
// Submit this runnable back into the thread pool and execute
// another pass at eviction.
executeInThreadPool(this);
}
} catch (RegionDestroyedException e) {
// logging an error message. fixes bug 48162
if (HeapEvictor.this.mustEvict.get()) {
executeInThreadPool(this);
}
}
}
}
};
// Submit the first pass at eviction into the pool
executeInThreadPool(evictionManagerTask);
} else {
this.mustEvict.set(false);
}
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class CacheClientUpdater method handleDestroyRegion.
/**
* Locally destroy a region
*
* @param clientMessage message describing the region
*/
private void handleDestroyRegion(Message clientMessage) {
String regionName = null;
final boolean isDebugEnabled = logger.isDebugEnabled();
try {
// Retrieve the data from the local-destroy-region message parts
if (isDebugEnabled) {
logger.debug("Received destroy region message of length ({} bytes)", clientMessage.getPayloadLength());
}
int partCnt = 0;
Part regionNamePart = clientMessage.getPart(partCnt++);
Part callbackArgumentPart = clientMessage.getPart(partCnt++);
regionName = regionNamePart.getString();
Object callbackArgument = callbackArgumentPart.getObject();
Part hasCqsPart = clientMessage.getPart(partCnt++);
if (isDebugEnabled) {
logger.debug("Destroying region: {} callbackArgument: {}", regionName, callbackArgument);
}
// Handle CQs if any on this region.
if ((Boolean) hasCqsPart.getObject()) {
Part numCqsPart = clientMessage.getPart(partCnt++);
if (isDebugEnabled) {
logger.debug("Received message has CQ Event. Number of cqs interested in the event : {}", numCqsPart.getInt() / 2);
}
// TODO: partCnt is unused -- does processCqs have side effects
partCnt = processCqs(clientMessage, partCnt, numCqsPart.getInt(), clientMessage.getMessageType(), null, null);
}
// Confirm that the region exists
LocalRegion region = (LocalRegion) this.cacheHelper.getRegion(regionName);
if (region == null) {
if (isDebugEnabled && !quitting()) {
logger.debug("Region named {} does not exist", regionName);
}
return;
}
// Verify that the region in question should respond to this message
if (region.hasServerProxy()) {
// Locally destroy the region
region.localDestroyRegion(callbackArgument);
if (isDebugEnabled) {
logger.debug("Destroyed region: {} callbackArgument: {}", regionName, callbackArgument);
}
}
} catch (RegionDestroyedException ignore) {
// already destroyed
if (isDebugEnabled) {
logger.debug("region already destroyed: {}", regionName);
}
} catch (Exception e) {
String message = LocalizedStrings.CacheClientUpdater_CAUGHT_AN_EXCEPTION_WHILE_ATTEMPTING_TO_DESTROY_REGION_0.toLocalizedString(regionName);
handleException(message, e);
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class CacheClientProxy method destroyRQ.
private void destroyRQ() {
if (this._messageDispatcher == null) {
return;
}
try {
// Using Destroy Region bcoz this method is modified in HARegion so as
// not to distribute.
// For normal Regions , even the localDestroyRegion actually propagates
HARegionQueue rq = this._messageDispatcher._messageQueue;
rq.destroy();
// if (!rq.getRegion().isDestroyed()) {
// rq.getRegion().destroyRegion();
// }
} catch (RegionDestroyedException rde) {
// throw rde;
} catch (CancelException e) {
// throw e;
} catch (Exception warning) {
logger.warn(LocalizedMessage.create(LocalizedStrings.CacheClientProxy_0_EXCEPTION_IN_CLOSING_THE_UNDERLYING_HAREGION_OF_THE_HAREGIONQUEUE, this), warning);
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class Oplog method recoverDrf.
/**
* Return bytes read.
*/
long recoverDrf(OplogEntryIdSet deletedIds, boolean alreadyRecoveredOnce, boolean latestOplog) {
File drfFile = this.drf.f;
if (drfFile == null) {
this.haveRecoveredDrf = true;
return 0L;
}
lockCompactor();
try {
if (this.haveRecoveredDrf && !getHasDeletes())
// do this while holding lock
return 0L;
if (!this.haveRecoveredDrf) {
this.haveRecoveredDrf = true;
}
logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_RECOVERING_OPLOG_0_1_2, new Object[] { toString(), drfFile.getAbsolutePath(), getParent().getName() }));
this.recoverDelEntryId = DiskStoreImpl.INVALID_ID;
boolean readLastRecord = true;
CountingDataInputStream dis = null;
try {
int recordCount = 0;
boolean foundDiskStoreRecord = false;
FileInputStream fis = null;
try {
fis = new FileInputStream(drfFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 32 * 1024), drfFile.length());
boolean endOfLog = false;
while (!endOfLog) {
if (dis.atEndOfFile()) {
endOfLog = true;
break;
}
readLastRecord = false;
byte opCode = dis.readByte();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "drf byte={} location={}", opCode, Long.toHexString(dis.getCount()));
}
switch(opCode) {
case OPLOG_EOF_ID:
// we are at the end of the oplog. So we need to back up one byte
dis.decrementCount();
endOfLog = true;
break;
case OPLOG_DEL_ENTRY_1ID:
case OPLOG_DEL_ENTRY_2ID:
case OPLOG_DEL_ENTRY_3ID:
case OPLOG_DEL_ENTRY_4ID:
case OPLOG_DEL_ENTRY_5ID:
case OPLOG_DEL_ENTRY_6ID:
case OPLOG_DEL_ENTRY_7ID:
case OPLOG_DEL_ENTRY_8ID:
readDelEntry(dis, opCode, deletedIds, parent);
recordCount++;
break;
case OPLOG_DISK_STORE_ID:
readDiskStoreRecord(dis, this.drf.f);
foundDiskStoreRecord = true;
recordCount++;
break;
case OPLOG_MAGIC_SEQ_ID:
readOplogMagicSeqRecord(dis, this.drf.f, OPLOG_TYPE.DRF);
break;
case OPLOG_GEMFIRE_VERSION:
readGemfireVersionRecord(dis, this.drf.f);
recordCount++;
break;
case OPLOG_RVV:
long idx = dis.getCount();
readRVVRecord(dis, this.drf.f, true, latestOplog);
recordCount++;
break;
default:
throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
}
readLastRecord = true;
// @todo
// if (rgn.isDestroyed()) {
// break;
// }
}
// while
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
if (!foundDiskStoreRecord && recordCount > 0) {
throw new DiskAccessException("The oplog file \"" + this.drf.f + "\" does not belong to the init file \"" + getParent().getInitFile() + "\". Drf did not contain a disk store id.", getParent());
}
} catch (EOFException ignore) {
// ignore since a partial record write can be caused by a crash
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(drfFile.getPath()), ex, getParent());
} catch (CancelException e) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", e);
}
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", e);
}
} catch (IllegalStateException e) {
throw e;
}
// Add the Oplog size to the Directory Holder which owns this oplog,
// so that available space is correctly calculated & stats updated.
long byteCount = 0;
if (!readLastRecord) {
// this means that there was a crash
// and hence we should not continue to read
// the next oplog
this.crashed = true;
if (dis != null) {
byteCount = dis.getFileLength();
}
} else {
if (dis != null) {
byteCount = dis.getCount();
}
}
if (!alreadyRecoveredOnce) {
setRecoveredDrfSize(byteCount);
this.dirHolder.incrementTotalOplogSize(byteCount);
}
return byteCount;
} finally {
unlockCompactor();
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class LocalRegion method recursiveDestroyRegion.
/**
* Removes entries and recursively destroys subregions.
*
* @param eventSet collects the events for all destroyed regions if null, then we're closing so
* don't send events to callbacks or destroy the disk region
*/
private void recursiveDestroyRegion(Set eventSet, RegionEventImpl regionEvent, boolean cacheWrite) throws CacheWriterException, TimeoutException {
final boolean isClose = regionEvent.getOperation().isClose();
// do the cacheWriter beforeRegionDestroy first to fix bug 47736
if (eventSet != null && cacheWrite) {
try {
cacheWriteBeforeRegionDestroy(regionEvent);
} catch (CancelException e) {
// I don't think this should ever happens: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_PROBLEM_IN_CACHEWRITEBEFOREREGIONDESTROY), e);
}
}
}
if (this.eventTracker != null) {
this.eventTracker.stop();
}
if (logger.isTraceEnabled(LogMarker.RVV) && getVersionVector() != null) {
logger.trace(LogMarker.RVV, "version vector for {} is {}", getName(), getVersionVector().fullToString());
}
cancelTTLExpiryTask();
cancelIdleExpiryTask();
cancelAllEntryExpiryTasks();
if (!isInternalRegion()) {
getCachePerfStats().incRegions(-1);
}
this.cache.getInternalResourceManager(false).removeResourceListener(this);
if (getMembershipAttributes().hasRequiredRoles()) {
if (!isInternalRegion()) {
getCachePerfStats().incReliableRegions(-1);
}
}
// then the add only needs to be done if hasListener || hasAdminListener
if (eventSet != null) {
eventSet.add(regionEvent);
}
try {
// call recursiveDestroyRegion on each subregion and remove it
// from this subregion map
Collection values = this.subregions.values();
for (Iterator itr = values.iterator(); itr.hasNext(); ) {
// element is a LocalRegion
Object element = itr.next();
LocalRegion region;
try {
LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
try {
// converts to a LocalRegion
region = toRegion(element);
} finally {
LocalRegion.setThreadInitLevelRequirement(LocalRegion.AFTER_INITIAL_IMAGE);
}
} catch (CancelException ignore) {
// ignore, keep going through the motions though
region = (LocalRegion) element;
} catch (RegionDestroyedException ignore) {
// SharedRegionData was destroyed
continue;
}
// failed initialization removing it from the parent subregion map
if (region.isDestroyed) {
continue;
}
// BEGIN operating on subregion of this region (rgn)
if (eventSet != null) {
regionEvent = (RegionEventImpl) regionEvent.clone();
regionEvent.region = region;
}
try {
region.recursiveDestroyRegion(eventSet, regionEvent, cacheWrite);
if (!region.isInternalRegion()) {
InternalDistributedSystem system = region.cache.getInternalDistributedSystem();
system.handleResourceEvent(ResourceEvent.REGION_REMOVE, region);
}
} catch (CancelException e) {
// I don't think this should ever happen: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_RECURSION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, region.getFullPath()), e);
}
}
// remove from this subregion map;
itr.remove();
// END operating on subregion of this region
}
try {
if (this.indexManager != null) {
try {
if (this instanceof BucketRegion) {
this.indexManager.removeBucketIndexes(getPartitionedRegion());
}
this.indexManager.destroy();
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
} catch (CancelException e) {
// I don't think this should ever happens: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_BASICDESTROYREGION_INDEX_REMOVAL_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
}
} finally {
// mark this region as destroyed.
if (regionEvent.isReinitializing()) {
this.reinitialized_old = true;
}
this.cache.setRegionByPath(getFullPath(), null);
if (this.eventTracker != null) {
this.eventTracker.stop();
}
if (this.diskRegion != null) {
this.diskRegion.prepareForClose(this);
}
this.isDestroyed = true;
// after isDestroyed is set to true call removeResourceListener to fix bug 49555
this.cache.getInternalResourceManager(false).removeResourceListener(this);
closeEntries();
if (logger.isDebugEnabled()) {
logger.debug("recursiveDestroyRegion: Region Destroyed: {}", getFullPath());
}
// otherwise, the listener will be closed after the destroy event
try {
postDestroyRegion(!isClose, regionEvent);
} catch (CancelException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_POSTDESTROYREGION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
// fix for bug #47061
if (getServerProxy() == null) {
closeCqs();
}
detachPool();
if (eventSet != null) {
closeCallbacksExceptListener();
} else {
closeAllCallbacks();
}
if (this.concurrencyChecksEnabled && this.dataPolicy.withReplication() && !this.cache.isClosed()) {
this.cache.getTombstoneService().unscheduleTombstones(this);
}
if (this.hasOwnStats) {
this.cachePerfStats.close();
}
}
}
Aggregations