use of org.apache.geode.CancelException in project geode by apache.
the class SerialAsyncEventQueueImpl method stop.
@Override
public void stop() {
if (logger.isDebugEnabled()) {
logger.debug("Stopping Gateway Sender : {}", this);
}
this.getLifeCycleLock().writeLock().lock();
try {
// Stop the dispatcher
AbstractGatewaySenderEventProcessor ev = this.eventProcessor;
if (ev != null && !ev.isStopped()) {
ev.stopProcessing();
}
// Stop the proxy (after the dispatcher, so the socket is still
// alive until after the dispatcher has stopped)
stompProxyDead();
// Close the listeners
for (AsyncEventListener listener : this.listeners) {
listener.close();
}
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayImpl_STOPPED__0, this));
clearTempEventsAfterSenderStopped();
} finally {
this.getLifeCycleLock().writeLock().unlock();
}
if (this.isPrimary()) {
try {
DistributedLockService.destroy(getSenderAdvisor().getDLockServiceName());
} catch (IllegalArgumentException e) {
// service not found... ignore
}
}
if (getQueues() != null && !getQueues().isEmpty()) {
for (RegionQueue q : getQueues()) {
((SerialGatewaySenderQueue) q).cleanUp();
}
}
this.setIsPrimary(false);
try {
new UpdateAttributesProcessor(this).distribute(false);
} catch (CancelException e) {
}
Thread lockObtainingThread = getSenderAdvisor().getLockObtainingThread();
if (lockObtainingThread != null && lockObtainingThread.isAlive()) {
// wait a while for thread to terminate
try {
lockObtainingThread.join(3000);
} catch (InterruptedException ex) {
// we allowed our join to be canceled
// reset interrupt bit so this thread knows it has been interrupted
Thread.currentThread().interrupt();
}
if (lockObtainingThread.isAlive()) {
logger.info(LocalizedMessage.create(LocalizedStrings.GatewaySender_COULD_NOT_STOP_LOCK_OBTAINING_THREAD_DURING_GATEWAY_SENDER_STOP));
}
}
InternalDistributedSystem system = (InternalDistributedSystem) this.cache.getDistributedSystem();
system.handleResourceEvent(ResourceEvent.GATEWAYSENDER_STOP, this);
}
use of org.apache.geode.CancelException in project geode by apache.
the class GMSMembershipManager method directChannelSend.
/**
* Perform the grossness associated with sending a message over a DirectChannel
*
* @param destinations the list of destinations
* @param content the message
* @param theStats the statistics object to update
* @return all recipients who did not receive the message (null if all received it)
* @throws NotSerializableException if the message is not serializable
*/
protected Set<InternalDistributedMember> directChannelSend(InternalDistributedMember[] destinations, DistributionMessage content, DMStats theStats) throws NotSerializableException {
boolean allDestinations;
InternalDistributedMember[] keys;
if (content.forAll()) {
allDestinations = true;
latestViewReadLock.lock();
try {
List<InternalDistributedMember> keySet = latestView.getMembers();
keys = new InternalDistributedMember[keySet.size()];
keys = keySet.toArray(keys);
} finally {
latestViewReadLock.unlock();
}
} else {
allDestinations = false;
keys = destinations;
}
int sentBytes;
try {
sentBytes = directChannel.send(this, keys, content, this.services.getConfig().getDistributionConfig().getAckWaitThreshold(), this.services.getConfig().getDistributionConfig().getAckSevereAlertThreshold());
if (theStats != null) {
theStats.incSentBytes(sentBytes);
}
if (sentBytes == 0) {
if (services.getCancelCriterion().isCancelInProgress()) {
throw new DistributedSystemDisconnectedException();
}
}
} catch (DistributedSystemDisconnectedException ex) {
if (services.getShutdownCause() != null) {
throw new DistributedSystemDisconnectedException("DistributedSystem is shutting down", services.getShutdownCause());
} else {
// see bug 41416
throw ex;
}
} catch (ConnectExceptions ex) {
// Check if the connect exception is due to system shutting down.
if (shutdownInProgress()) {
if (services.getShutdownCause() != null) {
throw new DistributedSystemDisconnectedException("DistributedSystem is shutting down", services.getShutdownCause());
} else {
throw new DistributedSystemDisconnectedException();
}
}
if (allDestinations)
return null;
// We
List<InternalDistributedMember> members = (List<InternalDistributedMember>) ex.getMembers();
// need
// to
// return
// this
// list
// of
// failures
// SANITY CHECK: If we fail to send a message to an existing member
// of the view, we have a serious error (bug36202).
// grab a recent view, excluding shunned
NetView view = services.getJoinLeave().getView();
// members
// Iterate through members and causes in tandem :-(
Iterator it_mem = members.iterator();
Iterator it_causes = ex.getCauses().iterator();
while (it_mem.hasNext()) {
InternalDistributedMember member = (InternalDistributedMember) it_mem.next();
Throwable th = (Throwable) it_causes.next();
if (!view.contains(member) || (th instanceof ShunnedMemberException)) {
continue;
}
logger.fatal(LocalizedMessage.create(LocalizedStrings.GroupMembershipService_FAILED_TO_SEND_MESSAGE_0_TO_MEMBER_1_VIEW_2, new Object[] { content, member, view }), th);
// Assert.assertTrue(false, "messaging contract failure");
}
return new HashSet<>(members);
}// catch ConnectionExceptions
catch (ToDataException | CancelException e) {
throw e;
} catch (IOException e) {
if (logger.isDebugEnabled()) {
logger.debug("Membership: directChannelSend caught exception: {}", e.getMessage(), e);
}
if (e instanceof NotSerializableException) {
throw (NotSerializableException) e;
}
} catch (RuntimeException | Error e) {
if (logger.isDebugEnabled()) {
logger.debug("Membership: directChannelSend caught exception: {}", e.getMessage(), e);
}
throw e;
}
return null;
}
use of org.apache.geode.CancelException in project geode by apache.
the class MemberFunctionStreamingMessage method process.
@Override
protected void process(final DistributionManager dm) {
Throwable thr = null;
ReplyException rex = null;
if (this.functionObject == null) {
rex = new ReplyException(new FunctionException(LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED.toLocalizedString(this.functionName)));
replyWithException(dm, rex);
return;
}
FunctionStats stats = FunctionStats.getFunctionStats(this.functionObject.getId(), dm.getSystem());
TXStateProxy tx = null;
try {
tx = prepForTransaction();
ResultSender resultSender = new MemberFunctionResultSender(dm, this, this.functionObject);
Set<Region> regions = new HashSet<Region>();
if (this.regionPathSet != null) {
InternalCache cache = GemFireCacheImpl.getInstance();
for (String regionPath : this.regionPathSet) {
if (checkCacheClosing(dm) || checkDSClosing(dm)) {
thr = new CacheClosedException(LocalizedStrings.PartitionMessage_REMOTE_CACHE_IS_CLOSED_0.toLocalizedString(dm.getId()));
return;
}
regions.add(cache.getRegion(regionPath));
}
}
FunctionContextImpl context = new MultiRegionFunctionContextImpl(this.functionObject.getId(), this.args, resultSender, regions, isReExecute);
long start = stats.startTime();
stats.startFunctionExecution(this.functionObject.hasResult());
if (logger.isDebugEnabled()) {
logger.debug("Executing Function: {} on remote member with context: {}", this.functionObject.getId(), context.toString());
}
this.functionObject.execute(context);
if (!this.replyLastMsg && this.functionObject.hasResult()) {
throw new FunctionException(LocalizedStrings.ExecuteFunction_THE_FUNCTION_0_DID_NOT_SENT_LAST_RESULT.toString(functionObject.getId()));
}
stats.endFunctionExecution(start, this.functionObject.hasResult());
} catch (FunctionException functionException) {
if (logger.isDebugEnabled()) {
logger.debug("FunctionException occurred on remote member while executing Function: {}", this.functionObject.getId(), functionException);
}
stats.endFunctionExecutionWithException(this.functionObject.hasResult());
rex = new ReplyException(functionException);
replyWithException(dm, rex);
// thr = functionException.getCause();
} catch (CancelException exception) {
// bug 37026: this is too noisy...
// throw new CacheClosedException("remote system shutting down");
// thr = se; cache is closed, no point trying to send a reply
thr = new FunctionInvocationTargetException(exception);
stats.endFunctionExecutionWithException(this.functionObject.hasResult());
rex = new ReplyException(thr);
replyWithException(dm, rex);
} catch (Exception exception) {
if (logger.isDebugEnabled()) {
logger.debug("Exception occurred on remote member while executing Function: {}", this.functionObject.getId(), exception);
}
stats.endFunctionExecutionWithException(this.functionObject.hasResult());
rex = new ReplyException(exception);
replyWithException(dm, rex);
// thr = e.getCause();
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
thr = t;
} finally {
cleanupTransaction(tx);
if (thr != null) {
rex = new ReplyException(thr);
replyWithException(dm, rex);
}
}
}
use of org.apache.geode.CancelException in project geode by apache.
the class LocalRegion method basicRemoveAll.
VersionedObjectList basicRemoveAll(final Collection<Object> keys, final DistributedRemoveAllOperation removeAllOp, final List<VersionTag> retryVersions) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final boolean isTraceEnabled = logger.isTraceEnabled();
final EntryEventImpl event = removeAllOp.getBaseEvent();
EventID eventId = event.getEventId();
if (eventId == null && generateEventID()) {
// We need to "reserve" the eventIds for the entries in map here
event.reserveNewEventId(this.cache.getDistributedSystem(), keys.size());
eventId = event.getEventId();
}
verifyRemoveAllKeys(keys);
VersionedObjectList proxyResult = null;
boolean partialResult = false;
RuntimeException runtimeException = null;
if (hasServerProxy()) {
// send message to bridge server
if (isTX()) {
TXStateProxyImpl txState = (TXStateProxyImpl) this.cache.getTxManager().getTXState();
txState.getRealDeal(null, this);
}
try {
proxyResult = getServerProxy().removeAll(keys, eventId, event.getCallbackArgument());
if (isDebugEnabled) {
logger.debug("removeAll received response from server: {}", proxyResult);
}
} catch (PutAllPartialResultException e) {
// adjust the map to only add succeeded entries, then apply the adjustedMap
proxyResult = e.getSucceededKeysAndVersions();
partialResult = true;
if (isDebugEnabled) {
logger.debug("removeAll in client encountered a BulkOpPartialResultException: {}{}. Adjusted keys are: {}", e.getMessage(), getLineSeparator(), proxyResult.getKeys());
}
Throwable txException = e.getFailure();
while (txException != null) {
if (txException instanceof TransactionException) {
runtimeException = (RuntimeException) txException;
break;
}
txException = txException.getCause();
}
if (runtimeException == null) {
runtimeException = new ServerOperationException(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_At_Server_0.toLocalizedString(getFullPath()), e.getFailure());
}
}
}
final VersionedObjectList succeeded = new VersionedObjectList(keys.size(), true, this.concurrencyChecksEnabled);
// If this is a transactional removeAll, we will not have version information as it is only
// generated at commit
// so treat transactional removeAll as if the server is not versioned.
// If we have no storage then act as if the server is not versioned.
final boolean serverIsVersioned = proxyResult != null && proxyResult.regionIsVersioned() && !isTX() && getDataPolicy().withStorage();
if (!serverIsVersioned && !partialResult) {
// since the server is not versioned and we do not have a partial result
// get rid of the proxyResult info returned by the server.
proxyResult = null;
}
lockRVVForBulkOp();
try {
try {
int size = proxyResult == null ? keys.size() : proxyResult.size();
if (isInternalRegion()) {
if (isTraceEnabled) {
logger.trace("size of removeAll result is {} keys are {} proxyResult is {}", size, keys, proxyResult);
} else {
if (isTraceEnabled) {
logger.trace("size of removeAll result is {} keys are {} proxyResult is {}", size, keys, proxyResult);
}
}
} else {
if (isTraceEnabled) {
logger.trace("size of removeAll result is {} keys are {} proxyResult is {}", size, keys, proxyResult);
}
}
final PutAllPartialResult partialKeys = new PutAllPartialResult(size);
final Iterator iterator;
final boolean isVersionedResults;
if (proxyResult != null) {
iterator = proxyResult.iterator();
isVersionedResults = true;
} else {
iterator = keys.iterator();
isVersionedResults = false;
}
// TODO: refactor this mess
Runnable task = new Runnable() {
@Override
public void run() {
int offset = 0;
VersionTagHolder tagHolder = new VersionTagHolder();
while (iterator.hasNext()) {
stopper.checkCancelInProgress(null);
tagHolder.setVersionTag(null);
Object key;
VersionTag versionTag = null;
if (isVersionedResults) {
Map.Entry mapEntry = (Map.Entry) iterator.next();
key = mapEntry.getKey();
versionTag = ((VersionedObjectList.Entry) mapEntry).getVersionTag();
if (isDebugEnabled) {
logger.debug("removeAll key {} version={}", key, versionTag);
}
if (versionTag == null) {
if (isDebugEnabled) {
logger.debug("removeAll found invalid version tag, which means the entry is not found at server for key={}.", key);
}
succeeded.addKeyAndVersion(key, null);
continue;
}
// No need for special handling here in removeAll.
// We can just remove this key from the client with versionTag set to null.
} else {
key = iterator.next();
if (isTraceEnabled) {
logger.trace("removeAll {}", key);
}
}
try {
if (serverIsVersioned) {
if (isDebugEnabled) {
logger.debug("associating version tag with {} version={}", key, versionTag);
}
// If we have received a version tag from a server, add it to the event
tagHolder.setVersionTag(versionTag);
tagHolder.setFromServer(true);
} else if (retryVersions != null) {
VersionTag versionTag1 = retryVersions.get(offset);
if (versionTag1 != null) {
// If this is a retried event, and we have a version tag for the retry,
// add it to the event.
tagHolder.setVersionTag(versionTag1);
}
}
basicEntryRemoveAll(key, removeAllOp, offset, tagHolder);
// now we must check again since the cache may have closed during
// distribution causing this process to not receive and queue the
// event for clients
stopper.checkCancelInProgress(null);
succeeded.addKeyAndVersion(key, tagHolder.getVersionTag());
} catch (Exception ex) {
partialKeys.saveFailedKey(key, ex);
}
offset++;
}
}
};
syncBulkOp(task, eventId);
if (partialKeys.hasFailure()) {
// Bug 51725: Now succeeded contains an order key list, may be missing the version tags.
// Save reference of succeeded into partialKeys. The succeeded may be modified by
// postRemoveAll() to fill in the version tags.
partialKeys.setSucceededKeysAndVersions(succeeded);
logger.info(LocalizedMessage.create(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (isDebugEnabled) {
logger.debug(partialKeys.detailString());
}
if (runtimeException == null) {
// if received exception from server first, ignore local exception
if (removeAllOp.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
runtimeException = (RuntimeException) partialKeys.getFailure();
} else if (partialKeys.getFailure() instanceof LowMemoryException) {
// fix for #43589
throw partialKeys.getFailure();
} else {
runtimeException = new PutAllPartialResultException(partialKeys);
if (isDebugEnabled) {
logger.debug("basicRemoveAll: {}", partialKeys.detailString());
}
}
} else {
throw partialKeys.getFailure();
}
}
}
} catch (LowMemoryException lme) {
throw lme;
} catch (RuntimeException ex) {
runtimeException = ex;
} catch (Exception ex) {
runtimeException = new RuntimeException(ex);
} finally {
removeAllOp.getBaseEvent().release();
removeAllOp.freeOffHeapResources();
}
getDataView().postRemoveAll(removeAllOp, succeeded, this);
} finally {
unlockRVVForBulkOp();
}
if (runtimeException != null) {
throw runtimeException;
}
return succeeded;
}
use of org.apache.geode.CancelException in project geode by apache.
the class LocalRegion method basicDestroyRegion.
void basicDestroyRegion(RegionEventImpl event, boolean cacheWrite, boolean lock, boolean callbackEvents) throws CacheWriterException, TimeoutException {
preDestroyChecks();
final TXStateProxy tx = this.cache.getTXMgr().internalSuspend();
try {
boolean acquiredLock = false;
if (lock) {
try {
acquireDestroyLock();
acquiredLock = true;
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("basicDestroyRegion: acquireDestroyLock failed due to cache closure, region = {}", getFullPath());
}
}
}
try {
// maintain destroy lock and TXStateInterface
// I moved checkRegionDestroyed up out of the following
// try block because it does not seem correct to deliver
// a destroy event to the clients of the region was already
// destroyed on the server.
checkRegionDestroyed(false);
// see bug 47736
boolean cancelledByCacheWriterException = false;
HashSet eventSet = null;
try {
// ensure that destroy events are dispatched
if (this instanceof PartitionedRegion && !((PartitionedRegion) this).getParallelGatewaySenderIds().isEmpty()) {
((PartitionedRegion) this).destroyParallelGatewaySenderRegion(event.getOperation(), cacheWrite, lock, callbackEvents);
}
if (this.parentRegion != null) {
// "Bubble up" the cache statistics to parent if this regions are more recent
this.parentRegion.updateStats();
}
try {
eventSet = callbackEvents ? new HashSet() : null;
this.destroyedSubregionSerialNumbers = collectSubregionSerialNumbers();
recursiveDestroyRegion(eventSet, event, cacheWrite);
} catch (CancelException e) {
// a serious problem.
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_RECURSION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
} catch (CacheWriterException cwe) {
cancelledByCacheWriterException = true;
throw cwe;
}
// at this point all subregions are destroyed and this region has been marked as destroyed
// and postDestroyRegion has been called for each region. The only detail left is
// unhooking this region from the parent subregion map, and sending listener events
Assert.assertTrue(this.isDestroyed);
// artifacts From Management Layer
if (!isInternalRegion()) {
InternalDistributedSystem system = this.cache.getInternalDistributedSystem();
system.handleResourceEvent(ResourceEvent.REGION_REMOVE, this);
}
try {
LocalRegion parent = this.parentRegion;
if (parent == null) {
this.cache.removeRoot(this);
} else {
parent.subregions.remove(this.regionName, this);
}
} catch (CancelException e) {
// I don't think this should ever happens: bulletproofing for bug 39454
if (!this.cache.forcedDisconnect()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_BASICDESTROYREGION_PARENT_REMOVAL_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
}
}
} finally {
// ensure that destroy events are dispatched
if (!cancelledByCacheWriterException) {
// We only need to notify bridgeClients of the top level region destroy
// which it will take and do a localRegionDestroy.
// So we pass it event and NOT eventSet
event.setEventType(EnumListenerEvent.AFTER_REGION_DESTROY);
notifyBridgeClients(event);
}
// since some of the destroys happened.
if (eventSet != null && callbackEvents) {
try {
sendPendingRegionDestroyEvents(eventSet);
} catch (CancelException ignore) {
// ignore, we're mute.
}
}
}
} finally {
if (acquiredLock) {
try {
releaseDestroyLock();
} catch (CancelException ignore) {
// ignore
}
}
}
} finally {
this.cache.getTXMgr().internalResume(tx);
}
}
Aggregations