use of org.apache.geode.CancelException in project geode by apache.
the class LocalRegion method basicPutAll.
// TODO: refactor basicPutAll
public VersionedObjectList basicPutAll(final Map<?, ?> map, final DistributedPutAllOperation putAllOp, final Map<Object, VersionTag> retryVersions) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final EntryEventImpl event = putAllOp.getBaseEvent();
EventID eventId = event.getEventId();
if (eventId == null && generateEventID()) {
// We need to "reserve" the eventIds for the entries in map here
event.reserveNewEventId(this.cache.getDistributedSystem(), map.size());
eventId = event.getEventId();
}
verifyPutAllMap(map);
VersionedObjectList proxyResult = null;
boolean partialResult = false;
RuntimeException runtimeException = null;
if (hasServerProxy()) {
// send message to bridge server
if (isTX()) {
TXStateProxyImpl txState = (TXStateProxyImpl) this.cache.getTxManager().getTXState();
txState.getRealDeal(null, this);
}
try {
proxyResult = getServerProxy().putAll(map, eventId, !event.isGenerateCallbacks(), event.getCallbackArgument());
if (isDebugEnabled) {
logger.debug("PutAll received response from server: {}", proxyResult);
}
} catch (PutAllPartialResultException e) {
// adjust the map to only add succeeded entries, then apply the adjustedMap
proxyResult = e.getSucceededKeysAndVersions();
partialResult = true;
if (isDebugEnabled) {
logger.debug("putAll in client encountered a PutAllPartialResultException:{}{}. Adjusted keys are: {}", e.getMessage(), getLineSeparator(), proxyResult.getKeys());
}
Throwable txException = e.getFailure();
while (txException != null) {
if (txException instanceof TransactionException) {
runtimeException = (RuntimeException) txException;
break;
}
txException = txException.getCause();
}
if (runtimeException == null) {
// for cache close
runtimeException = getCancelCriterion().generateCancelledException(e.getFailure());
if (runtimeException == null) {
runtimeException = new ServerOperationException(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(getFullPath()), e.getFailure());
}
}
}
}
final VersionedObjectList succeeded = new VersionedObjectList(map.size(), true, this.concurrencyChecksEnabled);
// if this is a transactional putAll, we will not have version information as it is only
// generated at commit
// so treat transactional putAll as if the server is not versioned
final boolean serverIsVersioned = proxyResult != null && proxyResult.regionIsVersioned() && !isTX() && this.dataPolicy != DataPolicy.EMPTY;
if (!serverIsVersioned && !partialResult) {
// we don't need server information if it isn't versioned or if the region is empty
proxyResult = null;
}
lockRVVForBulkOp();
try {
try {
int size = proxyResult == null ? map.size() : proxyResult.size();
if (isDebugEnabled) {
logger.debug("size of put result is {} maps is {} proxyResult is {}", size, map, proxyResult);
}
final PutAllPartialResult partialKeys = new PutAllPartialResult(size);
final Iterator iterator;
final boolean isVersionedResults;
if (proxyResult != null) {
iterator = proxyResult.iterator();
isVersionedResults = true;
} else {
iterator = map.entrySet().iterator();
isVersionedResults = false;
}
// TODO: refactor this mess
Runnable task = new Runnable() {
@Override
public void run() {
int offset = 0;
VersionTagHolder tagHolder = new VersionTagHolder();
while (iterator.hasNext()) {
stopper.checkCancelInProgress(null);
Map.Entry mapEntry = (Map.Entry) iterator.next();
Object key = mapEntry.getKey();
VersionTag versionTag = null;
tagHolder.setVersionTag(null);
final Object value;
boolean overwritten = false;
if (isVersionedResults) {
versionTag = ((VersionedObjectList.Entry) mapEntry).getVersionTag();
value = map.get(key);
if (isDebugEnabled) {
logger.debug("putAll key {} -> {} version={}", key, value, versionTag);
}
if (versionTag == null && serverIsVersioned && concurrencyChecksEnabled && dataPolicy.withStorage()) {
// entry since we don't know what its state should be (but the server should)
if (isDebugEnabled) {
logger.debug("server returned no version information for {}", key);
}
localDestroyNoCallbacks(key);
// to be consistent we need to fetch the current entry
get(key, event.getCallbackArgument(), false, null);
overwritten = true;
}
} else {
value = mapEntry.getValue();
if (isDebugEnabled) {
logger.debug("putAll {} -> {}", key, value);
}
}
try {
if (serverIsVersioned) {
if (isDebugEnabled) {
logger.debug("associating version tag with {} version={}", key, versionTag);
}
// If we have received a version tag from a server, add it to the event
tagHolder.setVersionTag(versionTag);
tagHolder.setFromServer(true);
} else if (retryVersions != null && retryVersions.containsKey(key)) {
// If this is a retried event, and we have a version tag for the retry,
// add it to the event.
tagHolder.setVersionTag(retryVersions.get(key));
}
if (!overwritten) {
basicEntryPutAll(key, value, putAllOp, offset, tagHolder);
}
// now we must check again since the cache may have closed during
// distribution (causing this process to not receive and queue the
// event for clients
stopper.checkCancelInProgress(null);
succeeded.addKeyAndVersion(key, tagHolder.getVersionTag());
} catch (Exception ex) {
if (isDebugEnabled) {
logger.debug("PutAll operation encountered exception for key {}", key, ex);
}
partialKeys.saveFailedKey(key, ex);
}
offset++;
}
}
};
syncBulkOp(task, eventId);
if (partialKeys.hasFailure()) {
// Bug 51725: Now succeeded contains an order key list, may be missing the version tags.
// Save reference of succeeded into partialKeys. The succeeded may be modified by
// postPutAll() to fill in the version tags.
partialKeys.setSucceededKeysAndVersions(succeeded);
logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (isDebugEnabled) {
logger.debug(partialKeys.detailString());
}
if (runtimeException == null) {
// if received exception from server first, ignore local exception
if (putAllOp.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
runtimeException = (RuntimeException) partialKeys.getFailure();
} else if (partialKeys.getFailure() instanceof LowMemoryException) {
// fix for #43589
throw partialKeys.getFailure();
} else {
runtimeException = new PutAllPartialResultException(partialKeys);
if (isDebugEnabled) {
logger.debug("basicPutAll:" + partialKeys.detailString());
}
}
} else {
throw partialKeys.getFailure();
}
}
}
} catch (LowMemoryException lme) {
throw lme;
} catch (RuntimeException ex) {
runtimeException = ex;
} catch (Exception ex) {
runtimeException = new RuntimeException(ex);
} finally {
putAllOp.getBaseEvent().release();
putAllOp.freeOffHeapResources();
}
getDataView().postPutAll(putAllOp, succeeded, this);
} finally {
unlockRVVForBulkOp();
}
if (runtimeException != null) {
throw runtimeException;
}
return succeeded;
}
use of org.apache.geode.CancelException in project geode by apache.
the class Oplog method recoverDrf.
/**
* Return bytes read.
*/
long recoverDrf(OplogEntryIdSet deletedIds, boolean alreadyRecoveredOnce, boolean latestOplog) {
File drfFile = this.drf.f;
if (drfFile == null) {
this.haveRecoveredDrf = true;
return 0L;
}
lockCompactor();
try {
if (this.haveRecoveredDrf && !getHasDeletes())
// do this while holding lock
return 0L;
if (!this.haveRecoveredDrf) {
this.haveRecoveredDrf = true;
}
logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_RECOVERING_OPLOG_0_1_2, new Object[] { toString(), drfFile.getAbsolutePath(), getParent().getName() }));
this.recoverDelEntryId = DiskStoreImpl.INVALID_ID;
boolean readLastRecord = true;
CountingDataInputStream dis = null;
try {
int recordCount = 0;
boolean foundDiskStoreRecord = false;
FileInputStream fis = null;
try {
fis = new FileInputStream(drfFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 32 * 1024), drfFile.length());
boolean endOfLog = false;
while (!endOfLog) {
if (dis.atEndOfFile()) {
endOfLog = true;
break;
}
readLastRecord = false;
byte opCode = dis.readByte();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "drf byte={} location={}", opCode, Long.toHexString(dis.getCount()));
}
switch(opCode) {
case OPLOG_EOF_ID:
// we are at the end of the oplog. So we need to back up one byte
dis.decrementCount();
endOfLog = true;
break;
case OPLOG_DEL_ENTRY_1ID:
case OPLOG_DEL_ENTRY_2ID:
case OPLOG_DEL_ENTRY_3ID:
case OPLOG_DEL_ENTRY_4ID:
case OPLOG_DEL_ENTRY_5ID:
case OPLOG_DEL_ENTRY_6ID:
case OPLOG_DEL_ENTRY_7ID:
case OPLOG_DEL_ENTRY_8ID:
readDelEntry(dis, opCode, deletedIds, parent);
recordCount++;
break;
case OPLOG_DISK_STORE_ID:
readDiskStoreRecord(dis, this.drf.f);
foundDiskStoreRecord = true;
recordCount++;
break;
case OPLOG_MAGIC_SEQ_ID:
readOplogMagicSeqRecord(dis, this.drf.f, OPLOG_TYPE.DRF);
break;
case OPLOG_GEMFIRE_VERSION:
readGemfireVersionRecord(dis, this.drf.f);
recordCount++;
break;
case OPLOG_RVV:
long idx = dis.getCount();
readRVVRecord(dis, this.drf.f, true, latestOplog);
recordCount++;
break;
default:
throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
}
readLastRecord = true;
// @todo
// if (rgn.isDestroyed()) {
// break;
// }
}
// while
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
if (!foundDiskStoreRecord && recordCount > 0) {
throw new DiskAccessException("The oplog file \"" + this.drf.f + "\" does not belong to the init file \"" + getParent().getInitFile() + "\". Drf did not contain a disk store id.", getParent());
}
} catch (EOFException ignore) {
// ignore since a partial record write can be caused by a crash
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(drfFile.getPath()), ex, getParent());
} catch (CancelException e) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", e);
}
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", e);
}
} catch (IllegalStateException e) {
throw e;
}
// Add the Oplog size to the Directory Holder which owns this oplog,
// so that available space is correctly calculated & stats updated.
long byteCount = 0;
if (!readLastRecord) {
// this means that there was a crash
// and hence we should not continue to read
// the next oplog
this.crashed = true;
if (dis != null) {
byteCount = dis.getFileLength();
}
} else {
if (dis != null) {
byteCount = dis.getCount();
}
}
if (!alreadyRecoveredOnce) {
setRecoveredDrfSize(byteCount);
this.dirHolder.incrementTotalOplogSize(byteCount);
}
return byteCount;
} finally {
unlockCompactor();
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class LocalRegion method recursiveDestroyRegion.
/**
* Removes entries and recursively destroys subregions.
*
* @param eventSet collects the events for all destroyed regions if null, then we're closing so
* don't send events to callbacks or destroy the disk region
*/
private void recursiveDestroyRegion(Set eventSet, RegionEventImpl regionEvent, boolean cacheWrite) throws CacheWriterException, TimeoutException {
final boolean isClose = regionEvent.getOperation().isClose();
// do the cacheWriter beforeRegionDestroy first to fix bug 47736
if (eventSet != null && cacheWrite) {
try {
cacheWriteBeforeRegionDestroy(regionEvent);
} catch (CancelException e) {
// I don't think this should ever happens: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_PROBLEM_IN_CACHEWRITEBEFOREREGIONDESTROY), e);
}
}
}
if (this.eventTracker != null) {
this.eventTracker.stop();
}
if (logger.isTraceEnabled(LogMarker.RVV) && getVersionVector() != null) {
logger.trace(LogMarker.RVV, "version vector for {} is {}", getName(), getVersionVector().fullToString());
}
cancelTTLExpiryTask();
cancelIdleExpiryTask();
cancelAllEntryExpiryTasks();
if (!isInternalRegion()) {
getCachePerfStats().incRegions(-1);
}
this.cache.getInternalResourceManager(false).removeResourceListener(this);
if (getMembershipAttributes().hasRequiredRoles()) {
if (!isInternalRegion()) {
getCachePerfStats().incReliableRegions(-1);
}
}
// then the add only needs to be done if hasListener || hasAdminListener
if (eventSet != null) {
eventSet.add(regionEvent);
}
try {
// call recursiveDestroyRegion on each subregion and remove it
// from this subregion map
Collection values = this.subregions.values();
for (Iterator itr = values.iterator(); itr.hasNext(); ) {
// element is a LocalRegion
Object element = itr.next();
LocalRegion region;
try {
LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
try {
// converts to a LocalRegion
region = toRegion(element);
} finally {
LocalRegion.setThreadInitLevelRequirement(LocalRegion.AFTER_INITIAL_IMAGE);
}
} catch (CancelException ignore) {
// ignore, keep going through the motions though
region = (LocalRegion) element;
} catch (RegionDestroyedException ignore) {
// SharedRegionData was destroyed
continue;
}
// failed initialization removing it from the parent subregion map
if (region.isDestroyed) {
continue;
}
// BEGIN operating on subregion of this region (rgn)
if (eventSet != null) {
regionEvent = (RegionEventImpl) regionEvent.clone();
regionEvent.region = region;
}
try {
region.recursiveDestroyRegion(eventSet, regionEvent, cacheWrite);
if (!region.isInternalRegion()) {
InternalDistributedSystem system = region.cache.getInternalDistributedSystem();
system.handleResourceEvent(ResourceEvent.REGION_REMOVE, region);
}
} catch (CancelException e) {
// I don't think this should ever happen: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_RECURSION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, region.getFullPath()), e);
}
}
// remove from this subregion map;
itr.remove();
// END operating on subregion of this region
}
try {
if (this.indexManager != null) {
try {
if (this instanceof BucketRegion) {
this.indexManager.removeBucketIndexes(getPartitionedRegion());
}
this.indexManager.destroy();
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
} catch (CancelException e) {
// I don't think this should ever happens: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_BASICDESTROYREGION_INDEX_REMOVAL_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
}
} finally {
// mark this region as destroyed.
if (regionEvent.isReinitializing()) {
this.reinitialized_old = true;
}
this.cache.setRegionByPath(getFullPath(), null);
if (this.eventTracker != null) {
this.eventTracker.stop();
}
if (this.diskRegion != null) {
this.diskRegion.prepareForClose(this);
}
this.isDestroyed = true;
// after isDestroyed is set to true call removeResourceListener to fix bug 49555
this.cache.getInternalResourceManager(false).removeResourceListener(this);
closeEntries();
if (logger.isDebugEnabled()) {
logger.debug("recursiveDestroyRegion: Region Destroyed: {}", getFullPath());
}
// otherwise, the listener will be closed after the destroy event
try {
postDestroyRegion(!isClose, regionEvent);
} catch (CancelException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_POSTDESTROYREGION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
// fix for bug #47061
if (getServerProxy() == null) {
closeCqs();
}
detachPool();
if (eventSet != null) {
closeCallbacksExceptListener();
} else {
closeAllCallbacks();
}
if (this.concurrencyChecksEnabled && this.dataPolicy.withReplication() && !this.cache.isClosed()) {
this.cache.getTombstoneService().unscheduleTombstones(this);
}
if (this.hasOwnStats) {
this.cachePerfStats.close();
}
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class FunctionStreamingResultCollector method waitForCacheOrFunctionException.
/**
* Waits for the response from the recipient
*
* @throws CacheException if the recipient threw a cache exception during message processing
* @throws ForceReattemptException if the recipient left the distributed system before the
* response was received.
* @throws RegionDestroyedException if the peer has closed its copy of the region
*/
public boolean waitForCacheOrFunctionException(long timeout) throws CacheException, ForceReattemptException {
boolean timedOut = false;
try {
if (timeout == 0) {
waitForRepliesUninterruptibly();
timedOut = true;
} else {
timedOut = waitForRepliesUninterruptibly(timeout);
}
} catch (ReplyException e) {
removeMember(e.getSender(), true);
Throwable t = e.getCause();
if (t instanceof CacheException) {
throw (CacheException) t;
} else if (t instanceof RegionDestroyedException) {
throw (RegionDestroyedException) t;
} else if (t instanceof ForceReattemptException) {
throw new ForceReattemptException("Peer requests reattempt", t);
} else if (t instanceof PrimaryBucketException) {
throw new PrimaryBucketException("Peer failed primary test", t);
}
if (t instanceof CancelException) {
this.execution.failedNodes.add(e.getSender().getId());
String msg = "PartitionResponse got remote CacheClosedException, throwing PartitionedRegionCommunicationException";
logger.debug("{}, throwing ForceReattemptException", msg, t);
throw (CancelException) t;
}
if (e.getCause() instanceof FunctionException) {
throw (FunctionException) e.getCause();
}
e.handleAsUnexpected();
}
return timedOut;
}
use of org.apache.geode.CancelException in project geode by apache.
the class AbstractGatewaySender method waitUntilFlushed.
public boolean waitUntilFlushed(long timeout, TimeUnit unit) throws InterruptedException {
boolean result = false;
if (isParallel()) {
try {
WaitUntilParallelGatewaySenderFlushedCoordinator coordinator = new WaitUntilParallelGatewaySenderFlushedCoordinator(this, timeout, unit, true);
result = coordinator.waitUntilFlushed();
} catch (BucketMovedException | CancelException | RegionDestroyedException e) {
logger.warn(LocalizedStrings.AbstractGatewaySender_CAUGHT_EXCEPTION_ATTEMPTING_WAIT_UNTIL_FLUSHED_RETRYING.toLocalizedString(), e);
throw e;
} catch (Throwable t) {
logger.warn(LocalizedStrings.AbstractGatewaySender_CAUGHT_EXCEPTION_ATTEMPTING_WAIT_UNTIL_FLUSHED_RETURNING.toLocalizedString(), t);
throw new InternalGemFireError(t);
}
return result;
} else {
// Serial senders are currently not supported
throw new UnsupportedOperationException(LocalizedStrings.AbstractGatewaySender_WAIT_UNTIL_FLUSHED_NOT_SUPPORTED_FOR_SERIAL_SENDERS.toLocalizedString());
}
}
Aggregations