Search in sources :

Example 6 with HAEventWrapper

use of org.apache.geode.internal.cache.tier.sockets.HAEventWrapper in project geode by apache.

the class HARegionQueue method getBatchAndUpdateThreadContext.

/**
   * This method prepares the batch of events and updates the thread-context with corresponding
   * counters, so that when remove is called by this thread, these events are destroyed from the
   * queue.This method should always be invoked within the {@code rwLock}.
   * 
   * @param batchSize - number of events to be peeked
   * @return - list of events peeked
   */
private List getBatchAndUpdateThreadContext(int batchSize) {
    Iterator itr = this.idsAvailable.iterator();
    int currSize = this.idsAvailable.size();
    int limit = currSize >= batchSize ? batchSize : currSize;
    List batch = new ArrayList(limit);
    List peekedEventsThreadContext;
    if ((peekedEventsThreadContext = (List) HARegionQueue.peekedEventsContext.get()) == null) {
        peekedEventsThreadContext = new LinkedList();
    }
    for (int i = 0; i < limit; i++) {
        Long counter = (Long) itr.next();
        Object eventOrWrapper = this.region.get(counter);
        Object event;
        if (eventOrWrapper instanceof HAEventWrapper) {
            event = haContainer.get(eventOrWrapper);
            if (event == null) {
                event = ((HAEventWrapper) eventOrWrapper).getClientUpdateMessage();
            }
        } else {
            event = eventOrWrapper;
        }
        if (event != null) {
            batch.add(event);
        }
        peekedEventsThreadContext.add(counter);
    }
    HARegionQueue.peekedEventsContext.set(peekedEventsThreadContext);
    return batch;
}
Also used : Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) LinkedList(java.util.LinkedList) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Example 7 with HAEventWrapper

use of org.apache.geode.internal.cache.tier.sockets.HAEventWrapper in project geode by apache.

the class HARegionQueue method closeClientCq.

public void closeClientCq(ClientProxyMembershipID clientId, InternalCqQuery cqToClose) {
    acquireReadLock();
    try {
        // Get all available Ids for the HA Region Queue
        Object[] availableIds = this.availableIDsArray();
        int currSize = availableIds.length;
        Object event = null;
        for (int i = 0; i < currSize; i++) {
            Long counter = (Long) availableIds[i];
            event = this.region.get(counter);
            HAEventWrapper wrapper = null;
            if (event instanceof HAEventWrapper) {
                wrapper = (HAEventWrapper) event;
                event = this.haContainer.get(event);
            }
            // cannot be null
            if (event == null) {
                Assert.assertTrue(this.destroyInProgress, "Got event null when queue was not being destroyed");
            }
            if (event instanceof ClientUpdateMessageImpl) {
                ClientUpdateMessageImpl updateEvent = (ClientUpdateMessageImpl) event;
                updateEvent.removeClientCq(clientId, cqToClose);
                // If no more interest and no more cqs remove from available ids and backing region
                if (!updateEvent.hasCqs(clientId) && !updateEvent.isClientInterested(clientId)) {
                    if (wrapper != null) {
                        try {
                            if (this.destroyFromAvailableIDsAndRegion(counter)) {
                                stats.incEventsRemoved();
                            }
                        } catch (InterruptedException ignore) {
                            Thread.currentThread().interrupt();
                        }
                    }
                }
            }
        }
    } finally {
        releaseReadLock();
    }
}
Also used : ClientUpdateMessageImpl(org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl) AtomicLong(java.util.concurrent.atomic.AtomicLong) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Example 8 with HAEventWrapper

use of org.apache.geode.internal.cache.tier.sockets.HAEventWrapper in project geode by apache.

the class HARegionQueue method putEventInHARegion.

/**
   * If the event is an instance of HAEventWrapper, put it into the haContainer and then into the ha
   * region. Otherwise, simply put it into the ha region.
   * 
   * @since GemFire 5.7
   */
protected void putEventInHARegion(Conflatable event, Long position) {
    if (event instanceof HAEventWrapper) {
        HAEventWrapper haEventWrapper = (HAEventWrapper) event;
        if (this.isQueueInitialized()) {
            if (haEventWrapper.getIsRefFromHAContainer()) {
                putEntryConditionallyIntoHAContainer(haEventWrapper);
            } else {
                // This means that the haEvenWrapper reference we have is not
                // authentic, i.e. it doesn't refer to the HAEventWrapper instance
                // in the haContainer, but to the one outside it.
                boolean entryFound;
                // synchronized (this.haContainer) {
                HAEventWrapper original = null;
                do {
                    ClientUpdateMessageImpl old = (ClientUpdateMessageImpl) ((HAContainerWrapper) this.haContainer).putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
                    if (old != null) {
                        original = (HAEventWrapper) ((HAContainerWrapper) this.haContainer).getKey(haEventWrapper);
                        if (original == null) {
                            continue;
                        }
                        synchronized (original) {
                            // assert the entry is still present
                            if (((HAContainerWrapper) this.haContainer).getKey(original) != null) {
                                original.incAndGetReferenceCount();
                                addClientCQsAndInterestList(old, haEventWrapper, this.haContainer, this.regionName);
                                haEventWrapper = original;
                            } else {
                                original = null;
                            }
                        }
                    } else {
                        synchronized (haEventWrapper) {
                            haEventWrapper.incAndGetReferenceCount();
                            haEventWrapper.setHAContainer(this.haContainer);
                            if (!haEventWrapper.getPutInProgress()) {
                                // This means that this is a GII'ed event. Hence we must
                                // explicitly set 'clientUpdateMessage' to null.
                                haEventWrapper.setClientUpdateMessage(null);
                            }
                            haEventWrapper.setIsRefFromHAContainer(true);
                        }
                        break;
                    }
                } while (original == null);
            /*
           * entry = (Map.Entry)((HAContainerWrapper)this.haContainer) .getEntry(haEventWrapper); if
           * (entry == null) { entryFound = false;
           * putEntryConditionallyIntoHAContainer(haEventWrapper); } else { entryFound = true; // Do
           * not assign entry.getKey() to haEventWrapper right now.
           * ((HAEventWrapper)entry.getKey()).incAndGetReferenceCount(); } }//haContainer
           * synchronized ends if (entryFound) { addClientCQsAndInterestList(entry, haEventWrapper,
           * haContainer, regionName); haEventWrapper = (HAEventWrapper)entry.getKey(); } else { //
           * entry not found if (!haEventWrapper.getPutInProgress()) { // This means that this is a
           * GII'ed event. Hence we must // explicitly set 'clientUpdateMessage' to null.
           * haEventWrapper.setClientUpdateMessage(null); }
           * haEventWrapper.setIsRefFromHAContainer(true); }
           */
            }
        }
        // This has now been taken care of in AbstractRegionMap.initialImagePut()
        // else{
        // if(!haEventWrapper.getIsRefFromHAContainer()){
        // haEventWrapper =(HAEventWrapper)((HAContainerWrapper)haContainer).getKey(haEventWrapper);
        // }
        // }
        // Put the reference to the HAEventWrapper instance into the
        // HA queue.
        this.region.put(position, haEventWrapper);
    // logger.info(LocalizedStrings.DEBUG, "added message at position " + position);
    } else {
        // (event instanceof ClientMarkerMessageImpl OR ConflatableObject OR
        // ClientInstantiatorMessage)
        this.region.put(position, event);
    // logger.info(LocalizedStrings.DEBUG, "added non-msg at position " + position);
    }
}
Also used : ClientUpdateMessageImpl(org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Example 9 with HAEventWrapper

use of org.apache.geode.internal.cache.tier.sockets.HAEventWrapper in project geode by apache.

the class RequestEventValue method cmdExecute.

public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException {
    Part eventIDPart = null, valuePart = null;
    EventID event = null;
    Object callbackArg = null;
    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
    StringBuffer errMessage = new StringBuffer();
    serverConnection.setAsTrue(REQUIRES_RESPONSE);
    // Retrieve the data from the message parts
    int parts = clientMessage.getNumberOfParts();
    eventIDPart = clientMessage.getPart(0);
    if (eventIDPart == null) {
        logger.warn(LocalizedMessage.create(LocalizedStrings.RequestEventValue_0_THE_EVENT_ID_FOR_THE_GET_EVENT_VALUE_REQUEST_IS_NULL, serverConnection.getName()));
        errMessage.append(" The event id for the get event value request is null.");
        writeErrorResponse(clientMessage, MessageType.REQUESTDATAERROR, errMessage.toString(), serverConnection);
        serverConnection.setAsTrue(RESPONDED);
    } else {
        try {
            event = (EventID) eventIDPart.getObject();
        } catch (Exception e) {
            writeException(clientMessage, e, false, serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            return;
        }
        if (parts > 1) {
            valuePart = clientMessage.getPart(1);
            try {
                if (valuePart != null) {
                    callbackArg = valuePart.getObject();
                }
            } catch (Exception e) {
                writeException(clientMessage, e, false, serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
        }
        if (logger.isTraceEnabled()) {
            logger.trace("{}: Received get event value request ({} bytes) from {}", serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString());
        }
        CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
        // Get the ha container.
        HAContainerWrapper haContainer = (HAContainerWrapper) ccn.getHaContainer();
        if (haContainer == null) {
            String reason = " was not found during get event value request";
            writeRegionDestroyedEx(clientMessage, "ha container", reason, serverConnection);
            serverConnection.setAsTrue(RESPONDED);
        } else {
            Object[] valueAndIsObject = new Object[2];
            try {
                Object data = haContainer.get(new HAEventWrapper(event));
                if (data == null) {
                    logger.warn(LocalizedMessage.create(LocalizedStrings.RequestEventValue_UNABLE_TO_FIND_A_CLIENT_UPDATE_MESSAGE_FOR_0, event));
                    String msgStr = "No value found for " + event + " in " + haContainer.getName();
                    writeErrorResponse(clientMessage, MessageType.REQUEST_EVENT_VALUE_ERROR, msgStr, serverConnection);
                    serverConnection.setAsTrue(RESPONDED);
                    return;
                } else {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Value retrieved for event {}", event);
                    }
                    Object val = ((ClientUpdateMessageImpl) data).getValueToConflate();
                    if (!(val instanceof byte[])) {
                        if (val instanceof CachedDeserializable) {
                            val = ((CachedDeserializable) val).getSerializedValue();
                        } else {
                            val = CacheServerHelper.serialize(val);
                        }
                        ((ClientUpdateMessageImpl) data).setLatestValue(val);
                    }
                    valueAndIsObject[0] = val;
                    valueAndIsObject[1] = Boolean.valueOf(((ClientUpdateMessageImpl) data).valueIsObject());
                }
            } catch (Exception e) {
                writeException(clientMessage, e, false, serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
            Object data = valueAndIsObject[0];
            boolean isObject = (Boolean) valueAndIsObject[1];
            writeResponse(data, callbackArg, clientMessage, isObject, serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            ccn.getClientProxy(serverConnection.getProxyID()).getStatistics().incDeltaFullMessagesSent();
            if (logger.isDebugEnabled()) {
                logger.debug("{}: Wrote get event value response back to {} for ha container {}", serverConnection.getName(), serverConnection.getSocketString(), haContainer.getName());
            }
        }
    }
}
Also used : ClientUpdateMessageImpl(org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl) HAContainerWrapper(org.apache.geode.internal.cache.ha.HAContainerWrapper) CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) CacheClientNotifier(org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier) IOException(java.io.IOException) CachedRegionHelper(org.apache.geode.internal.cache.tier.CachedRegionHelper) Part(org.apache.geode.internal.cache.tier.sockets.Part) EventID(org.apache.geode.internal.cache.EventID) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Example 10 with HAEventWrapper

use of org.apache.geode.internal.cache.tier.sockets.HAEventWrapper in project geode by apache.

the class AbstractRegionMap method initialImagePut.

public boolean initialImagePut(final Object key, final long lastModified, Object newValue, final boolean wasRecovered, boolean deferLRUCallback, VersionTag entryVersion, InternalDistributedMember sender, boolean isSynchronizing) {
    boolean result = false;
    boolean done = false;
    boolean cleared = false;
    final LocalRegion owner = _getOwner();
    if (newValue == Token.TOMBSTONE && !owner.getConcurrencyChecksEnabled()) {
        return false;
    }
    if (owner instanceof HARegion && newValue instanceof CachedDeserializable) {
        Object actualVal = ((CachedDeserializable) newValue).getDeserializedValue(null, null);
        if (actualVal instanceof HAEventWrapper) {
            HAEventWrapper haEventWrapper = (HAEventWrapper) actualVal;
            // Key was removed at sender side so not putting it into the HARegion
            if (haEventWrapper.getClientUpdateMessage() == null) {
                return false;
            }
            // Getting the instance from singleton CCN..This assumes only one bridge
            // server in the VM
            HAContainerWrapper haContainer = (HAContainerWrapper) CacheClientNotifier.getInstance().getHaContainer();
            if (haContainer == null) {
                return false;
            }
            HAEventWrapper original = null;
            // synchronized (haContainer) {
            do {
                ClientUpdateMessageImpl oldMsg = (ClientUpdateMessageImpl) haContainer.putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
                if (oldMsg != null) {
                    original = (HAEventWrapper) haContainer.getKey(haEventWrapper);
                    if (original == null) {
                        continue;
                    }
                    synchronized (original) {
                        if ((HAEventWrapper) haContainer.getKey(original) != null) {
                            original.incAndGetReferenceCount();
                            HARegionQueue.addClientCQsAndInterestList(oldMsg, haEventWrapper, haContainer, owner.getName());
                            haEventWrapper.setClientUpdateMessage(null);
                            newValue = CachedDeserializableFactory.create(original, ((CachedDeserializable) newValue).getSizeInBytes());
                        } else {
                            original = null;
                        }
                    }
                } else {
                    // putIfAbsent successful
                    synchronized (haEventWrapper) {
                        haEventWrapper.incAndGetReferenceCount();
                        haEventWrapper.setHAContainer(haContainer);
                        haEventWrapper.setClientUpdateMessage(null);
                        haEventWrapper.setIsRefFromHAContainer(true);
                    }
                    break;
                }
            // try until we either get a reference to HAEventWrapper from
            // HAContainer or successfully put one into it.
            } while (original == null);
        /*
         * entry = (Map.Entry)haContainer.getEntry(haEventWrapper); if (entry != null) { original =
         * (HAEventWrapper)entry.getKey(); original.incAndGetReferenceCount(); } else {
         * haEventWrapper.incAndGetReferenceCount(); haEventWrapper.setHAContainer(haContainer);
         * haContainer.put(haEventWrapper, haEventWrapper .getClientUpdateMessage());
         * haEventWrapper.setClientUpdateMessage(null);
         * haEventWrapper.setIsRefFromHAContainer(true); } } if (entry != null) {
         * HARegionQueue.addClientCQsAndInterestList(entry, haEventWrapper, haContainer,
         * owner.getName()); haEventWrapper.setClientUpdateMessage(null); newValue =
         * CachedDeserializableFactory.create(original,
         * ((CachedDeserializable)newValue).getSizeInBytes()); }
         */
        }
    }
    try {
        RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
        EntryEventImpl event = null;
        @Retained @Released Object oldValue = null;
        try {
            RegionEntry oldRe = null;
            synchronized (newRe) {
                try {
                    oldRe = putEntryIfAbsent(key, newRe);
                    while (!done && oldRe != null) {
                        synchronized (oldRe) {
                            if (oldRe.isRemovedPhase2()) {
                                owner.getCachePerfStats().incRetries();
                                _getMap().remove(key, oldRe);
                                oldRe = putEntryIfAbsent(key, newRe);
                            } else {
                                boolean acceptedVersionTag = false;
                                if (entryVersion != null && owner.concurrencyChecksEnabled) {
                                    Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
                                    try {
                                        boolean isTombstone = (newValue == Token.TOMBSTONE);
                                        // don't reschedule the tombstone if it hasn't changed
                                        boolean isSameTombstone = oldRe.isTombstone() && isTombstone && oldRe.getVersionStamp().asVersionTag().equals(entryVersion);
                                        if (isSameTombstone) {
                                            return true;
                                        }
                                        processVersionTagForGII(oldRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
                                        acceptedVersionTag = true;
                                    } catch (ConcurrentCacheModificationException e) {
                                        return false;
                                    }
                                }
                                final boolean oldIsTombstone = oldRe.isTombstone();
                                final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
                                try {
                                    result = oldRe.initialImagePut(owner, lastModified, newValue, wasRecovered, acceptedVersionTag);
                                    if (result) {
                                        if (oldIsTombstone) {
                                            owner.unscheduleTombstone(oldRe);
                                            if (newValue != Token.TOMBSTONE) {
                                                lruEntryCreate(oldRe);
                                            } else {
                                                lruEntryUpdate(oldRe);
                                            }
                                        }
                                        if (newValue == Token.TOMBSTONE) {
                                            owner.updateSizeOnRemove(key, oldSize);
                                            if (owner.getServerProxy() == null && owner.getVersionVector().isTombstoneTooOld(entryVersion.getMemberID(), entryVersion.getRegionVersion())) {
                                                // the received tombstone has already been reaped, so don't retain it
                                                removeTombstone(oldRe, entryVersion, false, false);
                                                return false;
                                            } else {
                                                owner.scheduleTombstone(oldRe, entryVersion);
                                                lruEntryDestroy(oldRe);
                                            }
                                        } else {
                                            int newSize = owner.calculateRegionEntryValueSize(oldRe);
                                            if (!oldIsTombstone) {
                                                owner.updateSizeOnPut(key, oldSize, newSize);
                                            } else {
                                                owner.updateSizeOnCreate(key, newSize);
                                            }
                                            EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
                                        }
                                    }
                                    if (owner.getIndexManager() != null) {
                                        // as the update could not locate the old key
                                        if (!oldRe.isRemoved()) {
                                            owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
                                        }
                                        owner.getIndexManager().updateIndexes(oldRe, oldRe.isRemoved() ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, oldRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
                                    }
                                    done = true;
                                } finally {
                                    if (event != null) {
                                        event.release();
                                        event = null;
                                    }
                                }
                            }
                        }
                    }
                    if (!done) {
                        boolean versionTagAccepted = false;
                        if (entryVersion != null && owner.concurrencyChecksEnabled) {
                            Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
                            try {
                                boolean isTombstone = (newValue == Token.TOMBSTONE);
                                processVersionTagForGII(newRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
                                versionTagAccepted = true;
                            } catch (ConcurrentCacheModificationException e) {
                                return false;
                            }
                        }
                        result = newRe.initialImageInit(owner, lastModified, newValue, true, wasRecovered, versionTagAccepted);
                        try {
                            if (result) {
                                if (newValue == Token.TOMBSTONE) {
                                    owner.scheduleTombstone(newRe, entryVersion);
                                } else {
                                    owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(newRe));
                                    EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
                                    lruEntryCreate(newRe);
                                }
                                incEntryCount(1);
                            }
                            // Update local indexes
                            if (owner.getIndexManager() != null) {
                                // the update could not locate the old key
                                if (oldRe != null && !oldRe.isRemoved()) {
                                    owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
                                }
                                owner.getIndexManager().updateIndexes(newRe, newRe.isRemoved() ? IndexManager.REMOVE_ENTRY : IndexManager.UPDATE_ENTRY, newRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
                            }
                            done = true;
                        } finally {
                            if (event != null) {
                                event.release();
                                event = null;
                            }
                        }
                    }
                } finally {
                    if (done && result) {
                        initialImagePutEntry(newRe);
                    }
                    if (!done) {
                        removeEntry(key, newRe, false);
                        if (owner.getIndexManager() != null) {
                            owner.getIndexManager().updateIndexes(newRe, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
                        }
                    }
                }
            }
        // synchronized
        } finally {
            if (event != null)
                event.release();
            OffHeapHelper.release(oldValue);
        }
    } catch (RegionClearedException rce) {
        // Asif: do not issue any sort of callbacks
        done = false;
        cleared = true;
    } catch (QueryException qe) {
        done = false;
        cleared = true;
    } finally {
        if (done && !deferLRUCallback) {
            lruUpdateCallback();
        } else if (!cleared) {
            resetThreadLocals();
        }
    }
    return result;
}
Also used : ClientUpdateMessageImpl(org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl) HAContainerWrapper(org.apache.geode.internal.cache.ha.HAContainerWrapper) Released(org.apache.geode.internal.offheap.annotations.Released) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) QueryException(org.apache.geode.cache.query.QueryException) Retained(org.apache.geode.internal.offheap.annotations.Retained) StoredObject(org.apache.geode.internal.offheap.StoredObject) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Aggregations

HAEventWrapper (org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)10 ClientUpdateMessageImpl (org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl)4 IOException (java.io.IOException)3 Iterator (java.util.Iterator)3 AtomicLong (java.util.concurrent.atomic.AtomicLong)3 TimeoutException (org.apache.geode.cache.TimeoutException)3 Conflatable (org.apache.geode.internal.cache.Conflatable)3 ArrayList (java.util.ArrayList)2 ConcurrentModificationException (java.util.ConcurrentModificationException)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 NoSuchElementException (java.util.NoSuchElementException)2 CancelException (org.apache.geode.CancelException)2 InternalGemFireException (org.apache.geode.InternalGemFireException)2 CacheException (org.apache.geode.cache.CacheException)2 CacheWriterException (org.apache.geode.cache.CacheWriterException)2 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)2 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)2 HAContainerWrapper (org.apache.geode.internal.cache.ha.HAContainerWrapper)2 ClientUpdateMessage (org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessage)2