Search in sources :

Example 51 with Released

use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.

the class AbstractRegionMap method txApplyDestroy.

public void txApplyDestroy(Object key, TransactionId txId, TXRmtEvent txEvent, boolean inTokenMode, boolean inRI, Operation op, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, boolean isOriginRemote, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    final LocalRegion owner = _getOwner();
    final boolean isRegionReady = !inTokenMode;
    final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
    boolean cbEventInPending = false;
    IndexManager oqlIndexManager = owner.getIndexManager();
    try {
        RegionEntry re = getEntry(key);
        if (re != null) {
            // happened in parallel as it also does index INIT.
            if (oqlIndexManager != null) {
                oqlIndexManager.waitForIndexInit();
            }
            try {
                synchronized (re) {
                    if (!re.isRemoved() || re.isTombstone()) {
                        Object oldValue = re.getValueInVM(owner);
                        final int oldSize = owner.calculateRegionEntryValueSize(re);
                        // Create an entry event only if the calling context is
                        // a receipt of a TXCommitMessage AND there are callbacks installed
                        // for this region
                        boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                        @Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                        try {
                            if (owner.isUsedForPartitionedRegionBucket()) {
                                txHandleWANEvent(owner, cbEvent, txEntryState);
                            }
                            cbEvent.setRegionEntry(re);
                            cbEvent.setOldValue(oldValue);
                            if (isDebugEnabled) {
                                logger.debug("txApplyDestroy cbEvent={}", cbEvent);
                            }
                            txRemoveOldIndexEntry(Operation.DESTROY, re);
                            if (txEvent != null) {
                                txEvent.addDestroy(owner, re, re.getKey(), aCallbackArgument);
                            }
                            boolean clearOccured = false;
                            try {
                                processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
                                if (inTokenMode) {
                                    if (oldValue == Token.TOMBSTONE) {
                                        owner.unscheduleTombstone(re);
                                    }
                                    re.setValue(owner, Token.DESTROYED);
                                } else {
                                    if (!re.isTombstone()) {
                                        {
                                            if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                                re.makeTombstone(owner, cbEvent.getVersionTag());
                                            } else {
                                                // fix for bug 43063
                                                re.removePhase1(owner, false);
                                                re.removePhase2();
                                                removeEntry(key, re, false);
                                            }
                                        }
                                    } else {
                                        owner.rescheduleTombstone(re, re.getVersionStamp().asVersionTag());
                                    }
                                }
                                EntryLogger.logTXDestroy(_getOwnerObject(), key);
                                owner.updateSizeOnRemove(key, oldSize);
                            } catch (RegionClearedException rce) {
                                clearOccured = true;
                            }
                            owner.txApplyDestroyPart2(re, re.getKey(), inTokenMode, clearOccured);
                            if (invokeCallbacks) {
                                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                if (pendingCallbacks == null) {
                                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, true);
                                } else {
                                    pendingCallbacks.add(cbEvent);
                                    cbEventInPending = true;
                                }
                            }
                            if (!clearOccured) {
                                lruEntryDestroy(re);
                            }
                            if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                                txEntryState.setVersionTag(cbEvent.getVersionTag());
                            }
                        } finally {
                            if (!cbEventInPending)
                                cbEvent.release();
                        }
                    }
                }
            } finally {
                if (oqlIndexManager != null) {
                    oqlIndexManager.countDownIndexUpdaters();
                }
            }
        } else if (inTokenMode || owner.concurrencyChecksEnabled) {
            // treating tokenMode and re == null as same, since we now want to
            // generate versions and Tombstones for destroys
            boolean dispatchListenerEvent = inTokenMode;
            boolean opCompleted = false;
            // TODO: if inTokenMode then Token.DESTROYED is ok but what about !inTokenMode because
            // owner.concurrencyChecksEnabled? In that case we do not want a DESTROYED token.
            RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.DESTROYED);
            if (oqlIndexManager != null) {
                oqlIndexManager.waitForIndexInit();
            }
            EntryEventImpl cbEvent = null;
            try {
                synchronized (newRe) {
                    RegionEntry oldRe = putEntryIfAbsent(key, newRe);
                    while (!opCompleted && oldRe != null) {
                        synchronized (oldRe) {
                            if (oldRe.isRemovedPhase2()) {
                                owner.getCachePerfStats().incRetries();
                                _getMap().remove(key, oldRe);
                                oldRe = putEntryIfAbsent(key, newRe);
                            } else {
                                try {
                                    boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                                    cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                                    try {
                                        cbEvent.setRegionEntry(oldRe);
                                        cbEvent.setOldValue(Token.NOT_AVAILABLE);
                                        if (isDebugEnabled) {
                                            logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
                                        }
                                        if (owner.isUsedForPartitionedRegionBucket()) {
                                            txHandleWANEvent(owner, cbEvent, txEntryState);
                                        }
                                        processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
                                        if (invokeCallbacks) {
                                            switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                            if (pendingCallbacks == null) {
                                                owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
                                            } else {
                                                pendingCallbacks.add(cbEvent);
                                                cbEventInPending = true;
                                            }
                                        }
                                        int oldSize = 0;
                                        boolean wasTombstone = oldRe.isTombstone();
                                        {
                                            if (!wasTombstone) {
                                                oldSize = owner.calculateRegionEntryValueSize(oldRe);
                                            }
                                        }
                                        oldRe.setValue(owner, Token.DESTROYED);
                                        EntryLogger.logTXDestroy(_getOwnerObject(), key);
                                        if (wasTombstone) {
                                            owner.unscheduleTombstone(oldRe);
                                        }
                                        owner.updateSizeOnRemove(oldRe.getKey(), oldSize);
                                        owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, false);
                                        lruEntryDestroy(oldRe);
                                    } finally {
                                        if (!cbEventInPending)
                                            cbEvent.release();
                                    }
                                } catch (RegionClearedException rce) {
                                    owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, true);
                                }
                                if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                    oldRe.makeTombstone(owner, cbEvent.getVersionTag());
                                } else if (!inTokenMode) {
                                    // only remove for NORMAL regions if they do not generate versions see 51781
                                    // fix for bug 43063
                                    oldRe.removePhase1(owner, false);
                                    oldRe.removePhase2();
                                    removeEntry(key, oldRe, false);
                                }
                                opCompleted = true;
                            }
                        }
                    }
                    if (!opCompleted) {
                        // already has value set to Token.DESTROYED
                        opCompleted = true;
                        boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                        cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                        try {
                            cbEvent.setRegionEntry(newRe);
                            cbEvent.setOldValue(Token.NOT_AVAILABLE);
                            if (isDebugEnabled) {
                                logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
                            }
                            if (owner.isUsedForPartitionedRegionBucket()) {
                                txHandleWANEvent(owner, cbEvent, txEntryState);
                            }
                            processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
                            if (invokeCallbacks) {
                                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                if (pendingCallbacks == null) {
                                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
                                } else {
                                    pendingCallbacks.add(cbEvent);
                                    cbEventInPending = true;
                                }
                            }
                            EntryLogger.logTXDestroy(_getOwnerObject(), key);
                            owner.updateSizeOnCreate(newRe.getKey(), 0);
                            if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                newRe.makeTombstone(owner, cbEvent.getVersionTag());
                            } else if (!inTokenMode) {
                                // only remove for NORMAL regions if they do not generate versions see 51781
                                // fix for bug 43063
                                newRe.removePhase1(owner, false);
                                newRe.removePhase2();
                                removeEntry(key, newRe, false);
                            }
                            owner.txApplyDestroyPart2(newRe, newRe.getKey(), inTokenMode, false);
                        // Note no need for LRU work since the entry is destroyed
                        // and will be removed when gii completes
                        } finally {
                            if (!cbEventInPending)
                                cbEvent.release();
                        }
                    }
                    if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                        txEntryState.setVersionTag(cbEvent.getVersionTag());
                    }
                }
            } catch (RegionClearedException e) {
            // TODO
            } finally {
                if (oqlIndexManager != null) {
                    oqlIndexManager.countDownIndexUpdaters();
                }
            }
        } else if (re == null) {
            // Fix bug#43594
            // In cases where bucket region is re-created, it may so happen that
            // the destroy is already applied on the Initial image provider, thus
            // causing region entry to be absent.
            // Notify clients with client events.
            @Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
            try {
                if (owner.isUsedForPartitionedRegionBucket()) {
                    txHandleWANEvent(owner, cbEvent, txEntryState);
                }
                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                if (pendingCallbacks == null) {
                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, false);
                } else {
                    pendingCallbacks.add(cbEvent);
                    cbEventInPending = true;
                }
            } finally {
                if (!cbEventInPending)
                    cbEvent.release();
            }
        }
    } catch (DiskAccessException dae) {
        owner.handleDiskAccessException(dae);
        throw dae;
    }
}
Also used : IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) Released(org.apache.geode.internal.offheap.annotations.Released) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 52 with Released

use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.

the class AbstractRegionMap method copyRecoveredEntries.

public void copyRecoveredEntries(RegionMap rm) {
    // We need to sort the tombstones before scheduling them,
    // so that they will be in the correct order.
    OrderedTombstoneMap<RegionEntry> tombstones = new OrderedTombstoneMap<RegionEntry>();
    if (rm != null) {
        CustomEntryConcurrentHashMap<Object, Object> other = ((AbstractRegionMap) rm)._getMap();
        Iterator<Map.Entry<Object, Object>> it = other.entrySetWithReusableEntries().iterator();
        while (it.hasNext()) {
            Map.Entry<Object, Object> me = it.next();
            // This removes the RegionEntry from "rm" but it does not decrement its
            it.remove();
            // refcount to an offheap value.
            RegionEntry oldRe = (RegionEntry) me.getValue();
            Object key = me.getKey();
            @Retained @Released Object value = oldRe._getValueRetain((RegionEntryContext) ((AbstractRegionMap) rm)._getOwnerObject(), true);
            try {
                if (value == Token.NOT_AVAILABLE) {
                    // fix for bug 43993
                    value = null;
                }
                if (value == Token.TOMBSTONE && !_getOwner().getConcurrencyChecksEnabled()) {
                    continue;
                }
                RegionEntry newRe = getEntryFactory().createEntry((RegionEntryContext) _getOwnerObject(), key, value);
                // TODO: passing value to createEntry causes a problem with the disk stats.
                // The disk stats have already been set to track oldRe.
                // So when we call createEntry we probably want to give it REMOVED_PHASE1
                // and then set the value in copyRecoveredEntry it a way that does not
                // change the disk stats. This also depends on DiskEntry.Helper.initialize not changing
                // the stats for REMOVED_PHASE1
                copyRecoveredEntry(oldRe, newRe);
                // newRe is now in this._getMap().
                if (newRe.isTombstone()) {
                    VersionTag tag = newRe.getVersionStamp().asVersionTag();
                    tombstones.put(tag, newRe);
                } else {
                    _getOwner().updateSizeOnCreate(key, _getOwner().calculateRegionEntryValueSize(newRe));
                }
                incEntryCount(1);
                lruEntryUpdate(newRe);
            } finally {
                OffHeapHelper.release(value);
                if (oldRe instanceof OffHeapRegionEntry) {
                    ((OffHeapRegionEntry) oldRe).release();
                }
            }
            lruUpdateCallback();
        }
    } else {
        for (Iterator<RegionEntry> iter = regionEntries().iterator(); iter.hasNext(); ) {
            RegionEntry re = iter.next();
            if (re.isTombstone()) {
                if (re.getVersionStamp() == null) {
                    // bug #50992 - recovery from versioned to
                    // non-versioned
                    iter.remove();
                    continue;
                } else {
                    tombstones.put(re.getVersionStamp().asVersionTag(), re);
                }
            } else {
                _getOwner().updateSizeOnCreate(re.getKey(), _getOwner().calculateRegionEntryValueSize(re));
            }
        }
        incEntryCount(size());
        // Since lru was not being done during recovery call it now.
        lruUpdateCallback();
    }
    // Schedule all of the tombstones, now that we have sorted them
    Map.Entry<VersionTag, RegionEntry> entry;
    while ((entry = tombstones.take()) != null) {
        // refresh the tombstone so it doesn't time out too soon
        _getOwner().scheduleTombstone(entry.getValue(), entry.getKey());
    }
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) LRUEntry(org.apache.geode.internal.cache.lru.LRUEntry) Retained(org.apache.geode.internal.offheap.annotations.Retained) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) StoredObject(org.apache.geode.internal.offheap.StoredObject) Map(java.util.Map) CustomEntryConcurrentHashMap(org.apache.geode.internal.util.concurrent.CustomEntryConcurrentHashMap)

Example 53 with Released

use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.

the class DummyQRegion method asList.

@Override
public List asList() {
    if (valueInList == null) {
        valueInList = new ArrayList(1);
    }
    valueInList.clear();
    Object val = this.entry.getValueOffHeapOrDiskWithoutFaultIn((LocalRegion) getRegion());
    if (val instanceof StoredObject) {
        @Retained @Released StoredObject ohval = (StoredObject) val;
        try {
            val = ohval.getDeserializedValue(getRegion(), this.entry);
        } finally {
            ohval.release();
        }
    } else if (val instanceof CachedDeserializable) {
        val = ((CachedDeserializable) val).getDeserializedValue(getRegion(), this.entry);
    }
    valueInList.add(val);
    return valueInList;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) StoredObject(org.apache.geode.internal.offheap.StoredObject) Retained(org.apache.geode.internal.offheap.annotations.Retained) CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) ArrayList(java.util.ArrayList) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 54 with Released

use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.

the class PartitionedRegion method sendMsgByBucket.

/*
   * If failed after retries, it will throw PartitionedRegionStorageException, no need for return
   * value
   */
private VersionedObjectList sendMsgByBucket(final Integer bucketId, RemoveAllPRMessage prMsg) {
    // retry the put remotely until it finds the right node managing the bucket
    @Released EntryEventImpl event = prMsg.getFirstEvent(this);
    try {
        RetryTimeKeeper retryTime = null;
        InternalDistributedMember currentTarget = getNodeForBucketWrite(bucketId, null);
        if (logger.isDebugEnabled()) {
            logger.debug("PR.sendMsgByBucket:bucket {}'s currentTarget is {}", bucketId, currentTarget);
        }
        long timeOut = 0;
        int count = 0;
        for (; ; ) {
            switch(count) {
                case 0:
                    // First time. Assume success, keep going.
                    break;
                case 1:
                    this.cache.getCancelCriterion().checkCancelInProgress(null);
                    // Second time (first failure). Calculate timeout and keep going.
                    timeOut = System.currentTimeMillis() + this.retryTimeout;
                    break;
                default:
                    this.cache.getCancelCriterion().checkCancelInProgress(null);
                    // test for timeout
                    long timeLeft = timeOut - System.currentTimeMillis();
                    if (timeLeft < 0) {
                        PRHARedundancyProvider.timedOut(this, null, null, "update an entry", this.retryTimeout);
                    // NOTREACHED
                    }
                    // Didn't time out. Sleep a bit and then continue
                    boolean interrupted = Thread.interrupted();
                    try {
                        Thread.sleep(PartitionedRegionHelper.DEFAULT_WAIT_PER_RETRY_ITERATION);
                    } catch (InterruptedException ignore) {
                        interrupted = true;
                    } finally {
                        if (interrupted) {
                            Thread.currentThread().interrupt();
                        }
                    }
                    break;
            }
            // switch
            count++;
            if (currentTarget == null) {
                // pick target
                checkReadiness();
                if (retryTime == null) {
                    retryTime = new RetryTimeKeeper(this.retryTimeout);
                }
                currentTarget = waitForNodeOrCreateBucket(retryTime, event, bucketId);
                if (logger.isDebugEnabled()) {
                    logger.debug("PR.sendMsgByBucket: event size is {}, new currentTarget is {}", getEntrySize(event), currentTarget);
                }
                // It's possible this is a GemFire thread e.g. ServerConnection
                // which got to this point because of a distributed system shutdown or
                // region closure which uses interrupt to break any sleep() or wait() calls
                // e.g. waitForPrimary or waitForBucketRecovery in which case throw exception
                checkShutdown();
                continue;
            }
            try {
                return tryToSendOneRemoveAllMessage(prMsg, currentTarget);
            } catch (ForceReattemptException prce) {
                checkReadiness();
                InternalDistributedMember lastTarget = currentTarget;
                if (retryTime == null) {
                    retryTime = new RetryTimeKeeper(this.retryTimeout);
                }
                currentTarget = getNodeForBucketWrite(bucketId, retryTime);
                if (logger.isTraceEnabled()) {
                    logger.trace("PR.sendMsgByBucket: Old target was {}, Retrying {}", lastTarget, currentTarget);
                }
                if (lastTarget.equals(currentTarget)) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("PR.sendMsgByBucket: Retrying at the same node:{} due to {}", currentTarget, prce.getMessage());
                    }
                    if (retryTime.overMaximum()) {
                        PRHARedundancyProvider.timedOut(this, null, null, "update an entry", this.retryTimeout);
                    // NOTREACHED
                    }
                    retryTime.waitToRetryNode();
                }
                event.setPossibleDuplicate(true);
                if (prMsg != null) {
                    prMsg.setPossibleDuplicate(true);
                }
            } catch (PrimaryBucketException notPrimary) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Bucket {} on Node {} not primary", notPrimary.getLocalizedMessage(), currentTarget);
                }
                getRegionAdvisor().notPrimary(bucketId, currentTarget);
                if (retryTime == null) {
                    retryTime = new RetryTimeKeeper(this.retryTimeout);
                }
                currentTarget = getNodeForBucketWrite(bucketId, retryTime);
            } catch (DataLocationException dle) {
                if (logger.isDebugEnabled()) {
                    logger.debug("DataLocationException processing putAll", dle);
                }
                throw new TransactionException(dle);
            }
            // It's possible this is a GemFire thread e.g. ServerConnection
            // which got to this point because of a distributed system shutdown or
            // region closure which uses interrupt to break any sleep() or wait()
            // calls
            // e.g. waitForPrimary or waitForBucketRecovery in which case throw
            // exception
            checkShutdown();
            // If we get here, the attempt failed...
            if (count == 1) {
                this.prStats.incRemoveAllMsgsRetried();
            }
            this.prStats.incRemoveAllRetries();
        }
    // for
    // NOTREACHED
    } finally {
        event.release();
    }
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) TransactionException(org.apache.geode.cache.TransactionException) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember)

Example 55 with Released

use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.

the class LocalRegion method evictDestroy.

/**
   * @return true if the evict destroy was done; false if it was not needed
   */
boolean evictDestroy(LRUEntry entry) {
    checkReadiness();
    @Released final EntryEventImpl event = generateEvictDestroyEvent(entry.getKey());
    try {
        return mapDestroy(// cacheWrite
        event, // cacheWrite
        false, // isEviction
        true, // expectedOldValue
        null);
    } catch (CacheWriterException error) {
        throw new Error(LocalizedStrings.LocalRegion_CACHE_WRITER_SHOULD_NOT_HAVE_BEEN_CALLED_FOR_EVICTDESTROY.toLocalizedString(), error);
    } catch (TimeoutException anotherError) {
        throw new Error(LocalizedStrings.LocalRegion_NO_DISTRIBUTED_LOCK_SHOULD_HAVE_BEEN_ATTEMPTED_FOR_EVICTDESTROY.toLocalizedString(), anotherError);
    } catch (EntryNotFoundException yetAnotherError) {
        throw new Error(LocalizedStrings.LocalRegion_ENTRYNOTFOUNDEXCEPTION_SHOULD_BE_MASKED_FOR_EVICTDESTROY.toLocalizedString(), yetAnotherError);
    } finally {
        event.release();
    }
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireError(org.apache.geode.InternalGemFireError) CacheWriterException(org.apache.geode.cache.CacheWriterException) TimeoutException(org.apache.geode.cache.TimeoutException)

Aggregations

Released (org.apache.geode.internal.offheap.annotations.Released)57 StoredObject (org.apache.geode.internal.offheap.StoredObject)29 CacheWriterException (org.apache.geode.cache.CacheWriterException)13 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)13 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)12 ConcurrentCacheModificationException (org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException)9 GatewaySenderEventCallbackArgument (org.apache.geode.internal.cache.wan.GatewaySenderEventCallbackArgument)9 Retained (org.apache.geode.internal.offheap.annotations.Retained)9 EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)8 VersionedObjectList (org.apache.geode.internal.cache.tier.sockets.VersionedObjectList)8 DiskAccessException (org.apache.geode.cache.DiskAccessException)6 Operation (org.apache.geode.cache.Operation)6 ReplyException (org.apache.geode.distributed.internal.ReplyException)6 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 IndexManager (org.apache.geode.cache.query.internal.index.IndexManager)5 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)5 TimeoutException (org.apache.geode.cache.TimeoutException)4 EventID (org.apache.geode.internal.cache.EventID)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3