use of org.apache.geode.CancelException in project geode by apache.
the class DiskInitFile method recover.
DiskStoreID recover() {
recoverFromFailedCompaction();
if (!this.ifFile.exists()) {
// create a UUID using the cheaper Random class.
return new DiskStoreID(UUID.randomUUID());
}
DiskStoreID result = null;
try {
FileInputStream fis = null;
CountingDataInputStream dis = null;
try {
fis = new FileInputStream(this.ifFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
DiskInitFileParser parser = new DiskInitFileParser(dis, this);
result = parser.parse();
this.gotEOF = parser.gotEOF();
this.nextSeekPosition = dis.getCount();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "liveRecordCount={} totalRecordCount={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
}
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
for (PlaceHolderDiskRegion drv : this.drMap.values()) {
if (drv.getMyPersistentID() != null || drv.getMyInitializingID() != null) {
// Prepare each region we found in the init file for early recovery.
if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
}
getDiskStore().getStats().incUncreatedRecoveredRegions(1);
drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv, getDiskStore(), getDiskStore().getInternalRegionArguments()));
if (!getDiskStore().isOffline()) {
// schedule it for recovery since we want to recovery region data early now
getDiskStore().scheduleForRecovery(drv);
}
// else if we are validating or offlineCompacting
// then the scheduleForRecovery is called later in DiskStoreImpl
// this helps fix bug 42043
}
}
}
} catch (EOFException ex) {
// ignore since a partial record write can be caused by a crash
// throw new
// DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
// .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (ClassNotFoundException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", ignore);
}
} catch (RegionDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", ignore);
}
} catch (IllegalStateException ex) {
if (!this.parent.isClosing()) {
throw ex;
}
}
return result;
}
use of org.apache.geode.CancelException in project geode by apache.
the class QueueRemovalMessage method process.
/**
* Extracts the region from the message list and hands over the message removal task to the
* executor
*/
@Override
protected void process(DistributionManager dm) {
final InternalCache cache;
// use GemFireCache.getInstance to avoid blocking during cache.xml processing.
cache = GemFireCacheImpl.getInstance();
if (cache != null) {
Iterator iterator = this.messagesList.iterator();
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
try {
while (iterator.hasNext()) {
final String regionName = (String) iterator.next();
final int size = (Integer) iterator.next();
final LocalRegion region = (LocalRegion) cache.getRegion(regionName);
final HARegionQueue hrq;
if (region == null || !region.isInitialized()) {
hrq = null;
} else {
HARegionQueue tmp = ((HARegion) region).getOwner();
if (tmp != null && tmp.isQueueInitialized()) {
hrq = tmp;
} else {
hrq = null;
}
}
// a bunch of event IDs to go through
for (int i = 0; i < size; i++) {
final EventID id = (EventID) iterator.next();
boolean interrupted = Thread.interrupted();
if (hrq == null || !hrq.isQueueInitialized()) {
continue;
}
try {
// {
try {
if (logger.isTraceEnabled()) {
logger.trace("QueueRemovalMessage: removing dispatched events on queue {} for {}", regionName, id);
}
hrq.removeDispatchedEvents(id);
} catch (RegionDestroyedException ignore) {
logger.info(LocalizedMessage.create(LocalizedStrings.QueueRemovalMessage_QUEUE_FOUND_DESTROYED_WHILE_PROCESSING_THE_LAST_DISPTACHED_SEQUENCE_ID_FOR_A_HAREGIONQUEUES_DACE_THE_EVENT_ID_IS_0_FOR_HAREGION_WITH_NAME_1, new Object[] { id, regionName }));
} catch (CancelException ignore) {
// cache or DS is closing
return;
} catch (CacheException e) {
logger.error(LocalizedMessage.create(LocalizedStrings.QueueRemovalMessage_QUEUEREMOVALMESSAGEPROCESSEXCEPTION_IN_PROCESSING_THE_LAST_DISPTACHED_SEQUENCE_ID_FOR_A_HAREGIONQUEUES_DACE_THE_PROBLEM_IS_WITH_EVENT_ID__0_FOR_HAREGION_WITH_NAME_1, new Object[] { regionName, id }), e);
} catch (InterruptedException ignore) {
// interrupt occurs during shutdown. this runs in an executor, so just stop
return;
// processing
}
} catch (RejectedExecutionException ignore) {
interrupted = true;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// if
}
// for
} finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
}
}
// cache != null
}
use of org.apache.geode.CancelException in project geode by apache.
the class HARegionQueue method destroy.
/**
* destroys the underlying HARegion and removes its reference from the dispatched messages map
*/
public void destroy() throws CacheWriterException {
this.destroyInProgress = true;
Map tempDispatchedMessagesMap = dispatchedMessagesMap;
if (tempDispatchedMessagesMap != null) {
tempDispatchedMessagesMap.remove(this.regionName);
}
try {
try {
updateHAContainer();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
if (logger.isDebugEnabled()) {
logger.debug("HARegionQueue#destroy: ignored cancellation!!!!");
}
}
try {
this.region.destroyRegion();
} catch (RegionDestroyedException ignore) {
// keep going
} catch (CancelException ignore) {
// keep going
}
((HAContainerWrapper) haContainer).removeProxy(regionName);
} finally {
this.stats.close();
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class BucketProfileUpdateMessage method process.
@Override
protected void process(DistributionManager dm) {
try {
PartitionedRegion pr = PartitionedRegion.getPRFromId(this.prId);
// pr.waitOnBucketInitialization(); // While PR doesn't directly do GII, wait on this for
// bucket initialization -- mthomas 5/17/2007
pr.getRegionAdvisor().putBucketProfile(this.bucketId, this.profile);
} catch (PRLocallyDestroyedException fre) {
if (logger.isDebugEnabled())
logger.debug("<region locally destroyed> ///{}", this);
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled())
logger.debug("<region destroyed> ///{}", this);
} catch (CancelException e) {
if (logger.isDebugEnabled())
logger.debug("<cache closed> ///{}", this);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable ignore) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
} finally {
if (this.processorId != 0) {
ReplyMessage.send(getSender(), this.processorId, null, dm);
}
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class GMSMembershipManager method saveCacheXmlForReconnect.
/** generate XML from the cache before shutting down due to forced disconnect */
public void saveCacheXmlForReconnect(boolean sharedConfigEnabled) {
// first save the current cache description so reconnect can rebuild the cache
InternalCache cache = GemFireCacheImpl.getInstance();
if (cache != null) {
if (!Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "autoReconnect-useCacheXMLFile") && !sharedConfigEnabled) {
try {
logger.info("generating XML to rebuild the cache after reconnect completes");
StringPrintWriter pw = new StringPrintWriter();
CacheXmlGenerator.generate((Cache) cache, pw, true, false);
String cacheXML = pw.toString();
cache.getCacheConfig().setCacheXMLDescription(cacheXML);
logger.info("XML generation completed: {}", cacheXML);
} catch (CancelException e) {
logger.info(LocalizedMessage.create(LocalizedStrings.GroupMembershipService_PROBLEM_GENERATING_CACHE_XML), e);
}
} else if (sharedConfigEnabled && !cache.getCacheServers().isEmpty()) {
// we need to retain a cache-server description if this JVM was started by gfsh
List<CacheServerCreation> list = new ArrayList<>(cache.getCacheServers().size());
for (final Object o : cache.getCacheServers()) {
CacheServerImpl cs = (CacheServerImpl) o;
if (cs.isDefaultServer()) {
CacheServerCreation bsc = new CacheServerCreation(cache, cs);
list.add(bsc);
}
}
cache.getCacheConfig().setCacheServerCreation(list);
logger.info("CacheServer configuration saved");
}
}
}
Aggregations