use of org.apache.geode.cache.Operation in project geode by apache.
the class CqTimeTestListener method onEvent.
public void onEvent(CqEvent cqEvent) {
this.totalEventCount++;
long currentTime = System.currentTimeMillis();
Operation baseOperation = cqEvent.getBaseOperation();
Operation queryOperation = cqEvent.getQueryOperation();
Object key = cqEvent.getKey();
logger.info("### Got CQ Event ###; baseOp=" + baseOperation + ";queryOp=" + queryOperation);
logger.info("Number of events for the CQ: " + this.cqName + " : " + this.totalEventCount + " Key : " + key);
if (baseOperation.isUpdate()) {
this.eventUpdateCount++;
this.updates.add(key);
} else if (baseOperation.isCreate()) {
this.eventCreateCount++;
this.creates.add(key);
} else if (baseOperation.isDestroy()) {
this.eventDeleteCount++;
this.destroys.add(key);
} else if (baseOperation.isInvalidate()) {
this.eventDeleteCount++;
this.invalidates.add(key);
}
if (queryOperation.isUpdate()) {
this.eventQueryUpdateCount++;
long createTime = ((Portfolio) cqEvent.getNewValue()).getCreateTime();
this.eventQueryUpdateTime += (currentTime - createTime);
} else if (queryOperation.isCreate()) {
this.eventQueryInsertCount++;
long createTime = ((Portfolio) cqEvent.getNewValue()).getCreateTime();
this.eventQueryInsertTime += (currentTime - createTime);
} else if (queryOperation.isDestroy()) {
this.eventQueryDeleteCount++;
} else if (queryOperation.isInvalidate()) {
this.eventQueryInvalidateCount++;
}
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class CqQueryTestListener method onEvent.
public void onEvent(CqEvent cqEvent) {
this.totalEventCount++;
Operation baseOperation = cqEvent.getBaseOperation();
Operation queryOperation = cqEvent.getQueryOperation();
Object key = cqEvent.getKey();
if (key != null) {
events.add(key);
cqEvents.add(cqEvent);
}
if (baseOperation.isUpdate()) {
this.eventUpdateCount++;
this.updates.add(key);
} else if (baseOperation.isCreate()) {
this.eventCreateCount++;
this.creates.add(key);
} else if (baseOperation.isDestroy()) {
this.eventDeleteCount++;
this.destroys.add(key);
} else if (baseOperation.isInvalidate()) {
this.eventDeleteCount++;
this.invalidates.add(key);
}
if (queryOperation.isUpdate()) {
this.eventQueryUpdateCount++;
} else if (queryOperation.isCreate()) {
this.eventQueryInsertCount++;
} else if (queryOperation.isDestroy()) {
this.eventQueryDeleteCount++;
} else if (queryOperation.isInvalidate()) {
this.eventQueryInvalidateCount++;
} else if (queryOperation.isClear()) {
this.eventRegionClear = true;
} else if (queryOperation.isRegionInvalidate()) {
this.eventRegionInvalidate = true;
}
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class GemFireCacheImpl method close.
public void close(String reason, Throwable systemFailureCause, boolean keepAlive, boolean keepDS) {
this.securityService.close();
if (isClosed()) {
return;
}
final boolean isDebugEnabled = logger.isDebugEnabled();
synchronized (GemFireCacheImpl.class) {
// static synchronization is necessary due to static resources
if (isClosed()) {
return;
}
/*
* First close the ManagementService as it uses a lot of infra which will be closed by
* cache.close()
*/
this.system.handleResourceEvent(ResourceEvent.CACHE_REMOVE, this);
if (this.resourceEventsListener != null) {
this.system.removeResourceListener(this.resourceEventsListener);
this.resourceEventsListener = null;
}
if (systemFailureCause != null) {
this.forcedDisconnect = systemFailureCause instanceof ForcedDisconnectException;
if (this.forcedDisconnect) {
this.disconnectCause = new ForcedDisconnectException(reason);
} else {
this.disconnectCause = systemFailureCause;
}
}
this.keepAlive = keepAlive;
this.isClosing = true;
logger.info(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_NOW_CLOSING, this));
// available to anyone "fishing" for a cache...
if (GemFireCacheImpl.instance == this) {
GemFireCacheImpl.instance = null;
}
// threads may be hung trying to communicate with the map locked
if (systemFailureCause == null) {
PartitionedRegion.clearPRIdMap();
}
TXStateProxy tx = null;
try {
if (this.transactionManager != null) {
tx = this.transactionManager.internalSuspend();
}
// do this before closing regions
this.resourceManager.close();
try {
this.resourceAdvisor.close();
} catch (CancelException ignore) {
// ignore
}
try {
this.jmxAdvisor.close();
} catch (CancelException ignore) {
// ignore
}
for (GatewaySender sender : this.allGatewaySenders) {
try {
sender.stop();
GatewaySenderAdvisor advisor = ((AbstractGatewaySender) sender).getSenderAdvisor();
if (advisor != null) {
if (isDebugEnabled) {
logger.debug("Stopping the GatewaySender advisor");
}
advisor.close();
}
} catch (CancelException ignore) {
}
}
destroyGatewaySenderLockService();
if (this.eventThreadPool != null) {
if (isDebugEnabled) {
logger.debug("{}: stopping event thread pool...", this);
}
this.eventThreadPool.shutdown();
}
/*
* IMPORTANT: any operation during shut down that can time out (create a CancelException)
* must be inside of this try block. If all else fails, we *must* ensure that the cache gets
* closed!
*/
try {
this.stopServers();
stopMemcachedServer();
stopRedisServer();
stopRestAgentServer();
// cacheServers or gatewayHubs
if (this.partitionedRegions != null) {
if (isDebugEnabled) {
logger.debug("{}: clearing partitioned regions...", this);
}
synchronized (this.partitionedRegions) {
int prSize = -this.partitionedRegions.size();
this.partitionedRegions.clear();
getCachePerfStats().incPartitionedRegions(prSize);
}
}
prepareDiskStoresForClose();
if (GemFireCacheImpl.pdxInstance == this) {
GemFireCacheImpl.pdxInstance = null;
}
List<LocalRegion> rootRegionValues;
synchronized (this.rootRegions) {
rootRegionValues = new ArrayList<>(this.rootRegions.values());
}
{
final Operation op;
if (this.forcedDisconnect) {
op = Operation.FORCED_DISCONNECT;
} else if (isReconnecting()) {
op = Operation.CACHE_RECONNECT;
} else {
op = Operation.CACHE_CLOSE;
}
LocalRegion prRoot = null;
for (LocalRegion lr : rootRegionValues) {
if (isDebugEnabled) {
logger.debug("{}: processing region {}", this, lr.getFullPath());
}
if (PartitionedRegionHelper.PR_ROOT_REGION_NAME.equals(lr.getName())) {
prRoot = lr;
} else {
if (lr.getName().contains(ParallelGatewaySenderQueue.QSTRING)) {
// this region will be closed internally by parent region
continue;
}
if (isDebugEnabled) {
logger.debug("{}: closing region {}...", this, lr.getFullPath());
}
try {
lr.handleCacheClose(op);
} catch (RuntimeException e) {
if (isDebugEnabled || !this.forcedDisconnect) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_ERROR_CLOSING_REGION_1, new Object[] { this, lr.getFullPath() }), e);
}
}
}
}
try {
if (isDebugEnabled) {
logger.debug("{}: finishing partitioned region close...", this);
}
PartitionedRegion.afterRegionsClosedByCacheClose(this);
if (prRoot != null) {
// do the PR meta root region last
prRoot.handleCacheClose(op);
}
} catch (CancelException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GemFireCache_0_ERROR_IN_LAST_STAGE_OF_PARTITIONEDREGION_CACHE_CLOSE, this), e);
}
destroyPartitionedRegionLockService();
}
closeDiskStores();
this.diskMonitor.close();
// Close the CqService Handle.
try {
if (isDebugEnabled) {
logger.debug("{}: closing CQ service...", this);
}
this.cqService.close();
} catch (RuntimeException ignore) {
logger.info(LocalizedMessage.create(LocalizedStrings.GemFireCache_FAILED_TO_GET_THE_CQSERVICE_TO_CLOSE_DURING_CACHE_CLOSE_1));
}
PoolManager.close(keepAlive);
if (isDebugEnabled) {
logger.debug("{}: notifying admins of close...", this);
}
try {
SystemMemberCacheEventProcessor.send(this, Operation.CACHE_CLOSE);
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Ignored cancellation while notifying admins");
}
}
if (isDebugEnabled) {
logger.debug("{}: stopping destroyed entries processor...", this);
}
this.tombstoneService.stop();
// NOTICE: the CloseCache message is the *last* message you can send!
DM distributionManager = null;
try {
distributionManager = this.system.getDistributionManager();
distributionManager.removeMembershipListener(this.transactionManager);
} catch (CancelException ignore) {
// distributionManager = null;
}
if (distributionManager != null) {
// Send CacheClosedMessage (and NOTHING ELSE) here
if (isDebugEnabled) {
logger.debug("{}: sending CloseCache to peers...", this);
}
Set otherMembers = distributionManager.getOtherDistributionManagerIds();
ReplyProcessor21 processor = new ReplyProcessor21(this.system, otherMembers);
CloseCacheMessage msg = new CloseCacheMessage();
msg.setRecipients(otherMembers);
msg.setProcessorId(processor.getProcessorId());
distributionManager.putOutgoing(msg);
try {
processor.waitForReplies();
} catch (InterruptedException ignore) {
// Thread.currentThread().interrupt(); // TODO ??? should we reset this bit later?
// Keep going, make best effort to shut down.
} catch (ReplyException ignore) {
// keep going
}
// set closed state after telling others and getting responses
// to avoid complications with others still in the process of
// sending messages
}
// NO MORE Distributed Messaging AFTER THIS POINT!!!!
ClientMetadataService cms = this.clientMetadataService;
if (cms != null) {
cms.close();
}
closeHeapEvictor();
closeOffHeapEvictor();
} catch (CancelException ignore) {
// make sure the disk stores get closed
closeDiskStores();
// NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
}
// Close the CqService Handle.
try {
this.cqService.close();
} catch (RuntimeException ignore) {
logger.info(LocalizedMessage.create(LocalizedStrings.GemFireCache_FAILED_TO_GET_THE_CQSERVICE_TO_CLOSE_DURING_CACHE_CLOSE_2));
}
this.cachePerfStats.close();
TXLockService.destroyServices();
EventTracker.stopTrackerServices(this);
synchronized (this.ccpTimerMutex) {
if (this.ccpTimer != null) {
this.ccpTimer.cancel();
}
}
this.expirationScheduler.cancel();
// Stop QueryMonitor if running.
if (this.queryMonitor != null) {
this.queryMonitor.stopMonitoring();
}
stopDiskStoreTaskPool();
} finally {
// NO DISTRIBUTED MESSAGING CAN BE DONE HERE!
if (this.transactionManager != null) {
this.transactionManager.close();
}
((DynamicRegionFactoryImpl) DynamicRegionFactory.get()).close();
if (this.transactionManager != null) {
this.transactionManager.internalResume(tx);
}
TXCommitMessage.getTracker().clearForCacheClose();
}
// Added to close the TransactionManager's cleanup thread
TransactionManagerImpl.refresh();
if (!keepDS) {
// keepDS is used by ShutdownAll. It will override DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE
if (!this.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE) {
this.system.disconnect();
}
}
TypeRegistry.close();
// do this late to prevent 43412
TypeRegistry.setPdxSerializer(null);
for (CacheLifecycleListener listener : cacheLifecycleListeners) {
listener.cacheClosed(this);
}
// Fix for #49856
SequenceLoggerImpl.signalCacheClose();
SystemFailure.signalCacheClose();
}
// static synchronization on GemFireCache.class
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class FilterProfile method getInterestedClients.
/**
* get the clients interested in the given event that are attached to this server.
*
* @param event the entry event being applied to the cache
* @param akc allKeyClients collection
* @param koi keysOfInterest collection
* @param pats patternsOfInterest collection
* @param foi filtersOfInterest collection
* @return a set of the clients interested in the event
*/
private Set getInterestedClients(EntryEvent event, Set akc, Map<Object, Set> koi, Map<Object, Map<Object, Pattern>> pats, Map<Object, Map> foi) {
Set result = null;
if (akc != null) {
result = new HashSet(akc);
if (logger.isDebugEnabled()) {
logger.debug("these clients matched for all-keys: {}", akc);
}
}
if (koi != null) {
for (Iterator it = koi.entrySet().iterator(); it.hasNext(); ) {
Map.Entry entry = (Map.Entry) it.next();
Set keys = (Set) entry.getValue();
if (keys.contains(event.getKey())) {
Object clientID = entry.getKey();
if (result == null)
result = new HashSet();
result.add(clientID);
if (logger.isDebugEnabled()) {
logger.debug("client {} matched for key list (size {})", clientID, koi.get(clientID).size());
}
}
}
}
if (pats != null && (event.getKey() instanceof String)) {
for (Iterator it = pats.entrySet().iterator(); it.hasNext(); ) {
Map.Entry entry = (Map.Entry) it.next();
String stringKey = (String) event.getKey();
Map<Object, Pattern> interestList = (Map<Object, Pattern>) entry.getValue();
for (Pattern keyPattern : interestList.values()) {
if (keyPattern.matcher(stringKey).matches()) {
Object clientID = entry.getKey();
if (result == null)
result = new HashSet();
result.add(clientID);
if (logger.isDebugEnabled()) {
logger.debug("client {} matched for pattern ({})", clientID, pats.get(clientID));
}
break;
}
}
}
}
if (foi != null && foi.size() > 0) {
Object value;
boolean serialized;
{
SerializedCacheValue<?> serValue = event.getSerializedNewValue();
serialized = (serValue != null);
if (!serialized) {
value = event.getNewValue();
} else {
value = serValue.getSerializedValue();
}
}
InterestEvent iev = new InterestEvent(event.getKey(), value, !serialized);
Operation op = event.getOperation();
for (Iterator it = foi.entrySet().iterator(); it.hasNext(); ) {
Map.Entry entry = (Map.Entry) it.next();
Map<String, InterestFilter> interestList = (Map<String, InterestFilter>) entry.getValue();
for (InterestFilter filter : interestList.values()) {
if ((op.isCreate() && filter.notifyOnCreate(iev)) || (op.isUpdate() && filter.notifyOnUpdate(iev)) || (op.isDestroy() && filter.notifyOnDestroy(iev)) || (op.isInvalidate() && filter.notifyOnInvalidate(iev))) {
Object clientID = entry.getKey();
if (result == null)
result = new HashSet();
result.add(clientID);
if (logger.isDebugEnabled()) {
logger.debug("client {} matched for filter ({})", clientID, getFiltersOfInterest().get(clientID));
}
break;
}
}
}
}
return result;
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class ProxyRegionMap method txApplyPut.
public void txApplyPut(Operation p_putOp, Object key, Object newValue, boolean didDestroy, TransactionId txId, TXRmtEvent txEvent, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
Operation putOp = p_putOp.getCorrespondingCreateOp();
long lastMod = owner.cacheTimeMillis();
this.owner.txApplyPutPart2(markerEntry, key, lastMod, true, didDestroy, false);
if (this.owner.isInitialized()) {
if (txEvent != null) {
txEvent.addPut(putOp, this.owner, markerEntry, key, newValue, aCallbackArgument);
}
if (AbstractRegionMap.shouldCreateCBEvent(this.owner, this.owner.isInitialized())) {
// fix for bug 39526
boolean cbEventInPending = false;
@Released EntryEventImpl e = AbstractRegionMap.createCBEvent(this.owner, putOp, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
AbstractRegionMap.switchEventOwnerAndOriginRemote(e, txEntryState == null);
if (pendingCallbacks == null) {
this.owner.invokeTXCallbacks(EnumListenerEvent.AFTER_CREATE, e, true);
} else {
pendingCallbacks.add(e);
cbEventInPending = true;
}
} finally {
if (!cbEventInPending)
e.release();
}
}
}
}
Aggregations