Search in sources :

Example 36 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class PartitionedRegion method virtualPut.

@Override
boolean virtualPut(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, boolean overwriteDestroyed) throws TimeoutException, CacheWriterException {
    final long startTime = PartitionedRegionStats.startTime();
    boolean result = false;
    final DistributedPutAllOperation putAllOp_save = event.setPutAllOperation(null);
    if (event.getEventId() == null) {
        event.setNewEventId(this.cache.getDistributedSystem());
    }
    boolean bucketStorageAssigned = true;
    try {
        final Integer bucketId = event.getKeyInfo().getBucketId();
        assert bucketId != KeyInfo.UNKNOWN_BUCKET;
        // check in bucket2Node region
        InternalDistributedMember targetNode = getNodeForBucketWrite(bucketId, null);
        // and to optimize distribution.
        if (logger.isDebugEnabled()) {
            logger.debug("PR.virtualPut putting event={}", event);
        }
        if (targetNode == null) {
            try {
                bucketStorageAssigned = false;
                targetNode = createBucket(bucketId, event.getNewValSizeForPR(), null);
            } catch (PartitionedRegionStorageException e) {
                // try not to throw a PRSE if the cache is closing or this region was
                // destroyed during createBucket() (bug 36574)
                this.checkReadiness();
                if (this.cache.isClosed()) {
                    throw new RegionDestroyedException(toString(), getFullPath());
                }
                throw e;
            }
        }
        if (event.isBridgeEvent() && bucketStorageAssigned) {
            setNetworkHopType(bucketId, targetNode);
        }
        if (putAllOp_save == null) {
            result = putInBucket(targetNode, bucketId, event, ifNew, ifOld, expectedOldValue, requireOldValue, (ifNew ? 0L : lastModified));
            if (logger.isDebugEnabled()) {
                logger.debug("PR.virtualPut event={} ifNew={} ifOld={} result={}", event, ifNew, ifOld, result);
            }
        } else {
            // fix for 40502
            checkIfAboveThreshold(event);
            // putAll: save the bucket id into DPAO, then wait for postPutAll to send msg
            // at this time, DPAO's PutAllEntryData should be empty, we should add entry here with
            // bucket id
            // the message will be packed in postPutAll, include the one to local bucket, because the
            // buckets
            // could be changed at that time
            putAllOp_save.addEntry(event, bucketId);
            if (logger.isDebugEnabled()) {
                logger.debug("PR.virtualPut PutAll added event={} into bucket {}", event, bucketId);
            }
            result = true;
        }
    } catch (RegionDestroyedException rde) {
        if (!rde.getRegionFullPath().equals(getFullPath())) {
            throw new RegionDestroyedException(toString(), getFullPath(), rde);
        }
    } finally {
        if (putAllOp_save == null) {
            // only for normal put
            if (ifNew) {
                this.prStats.endCreate(startTime);
            } else {
                this.prStats.endPut(startTime);
            }
        }
    }
    if (!result) {
        checkReadiness();
        if (!ifNew && !ifOld && !this.concurrencyChecksEnabled) {
            // may fail due to concurrency conflict
            // failed for unknown reason
            // throw new PartitionedRegionStorageException("unable to execute operation");
            logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_PRVIRTUALPUT_RETURNING_FALSE_WHEN_IFNEW_AND_IFOLD_ARE_BOTH_FALSE), new Exception(LocalizedStrings.PartitionedRegion_STACK_TRACE.toLocalizedString()));
        }
    }
    return result;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) TimeoutException(org.apache.geode.cache.TimeoutException) IndexCreationException(org.apache.geode.cache.query.IndexCreationException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireException(org.apache.geode.InternalGemFireException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) LockServiceDestroyedException(org.apache.geode.distributed.LockServiceDestroyedException) GatewaySenderException(org.apache.geode.internal.cache.wan.GatewaySenderException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) GatewaySenderConfigurationException(org.apache.geode.internal.cache.wan.GatewaySenderConfigurationException) ExecutionException(java.util.concurrent.ExecutionException) ReplyException(org.apache.geode.distributed.internal.ReplyException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) EntryExistsException(org.apache.geode.cache.EntryExistsException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) NoSuchElementException(java.util.NoSuchElementException) QueryException(org.apache.geode.cache.query.QueryException) PartitionNotAvailableException(org.apache.geode.cache.partition.PartitionNotAvailableException) LowMemoryException(org.apache.geode.cache.LowMemoryException) InternalFunctionInvocationTargetException(org.apache.geode.internal.cache.execute.InternalFunctionInvocationTargetException) IndexInvalidException(org.apache.geode.cache.query.IndexInvalidException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) RegionExistsException(org.apache.geode.cache.RegionExistsException) CancelException(org.apache.geode.CancelException) DiskAccessException(org.apache.geode.cache.DiskAccessException) CacheWriterException(org.apache.geode.cache.CacheWriterException) TransactionException(org.apache.geode.cache.TransactionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) EmptyRegionFunctionException(org.apache.geode.cache.execute.EmptyRegionFunctionException)

Example 37 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class LocalRegion method findObjectInSystem.

/**
   * 
   * Search for the value in a server (if one exists), then try a loader.
   * 
   * If we find a value, we put it in the cache.
   * 
   * @param preferCD return the CacheDeserializable, if that's what the value is.
   * @param requestingClient the client making the request, if any
   * @param clientEvent the client's event, if any. If not null, we set the version tag
   * @return the deserialized value
   */
protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate, TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones) throws CacheLoaderException, TimeoutException {
    final Object key = keyInfo.getKey();
    final Object aCallbackArgument = keyInfo.getCallbackArg();
    Object value = null;
    boolean fromServer = false;
    VersionTagHolder holder = null;
    /*
     * First lets try the server
     */
    ServerRegionProxy mySRP = getServerProxy();
    if (mySRP != null) {
        holder = new VersionTagHolder();
        value = mySRP.get(key, aCallbackArgument, holder);
        fromServer = value != null;
    }
    /*
     * If we didn't get anything from the server, try the loader
     */
    if (!fromServer || value == Token.TOMBSTONE) {
        // copy into local var to prevent race condition
        CacheLoader loader = basicGetLoader();
        if (loader != null) {
            final LoaderHelper loaderHelper = this.loaderHelperFactory.createLoaderHelper(key, aCallbackArgument, false, /* netSearchAllowed */
            true, /* netloadAllowed */
            null);
            CachePerfStats stats = getCachePerfStats();
            long statStart = stats.startLoad();
            try {
                value = loader.load(loaderHelper);
                fromServer = false;
            } finally {
                stats.endLoad(statStart);
            }
        }
    }
    // have concurrency checks enabled
    if (fromServer && value == Token.TOMBSTONE && !this.concurrencyChecksEnabled) {
        value = null;
    }
    /*
     * If we got a value back, let's put it in the cache.
     */
    RegionEntry re = null;
    if (value != null && !isMemoryThresholdReachedForLoad()) {
        long startPut = CachePerfStats.getStatTime();
        validateKey(key);
        Operation op;
        if (isCreate) {
            op = Operation.LOCAL_LOAD_CREATE;
        } else {
            op = Operation.LOCAL_LOAD_UPDATE;
        }
        @Released EntryEventImpl event = EntryEventImpl.create(this, op, key, value, aCallbackArgument, false, getMyId(), generateCallbacks);
        try {
            // already one there with the same version
            if (fromServer) {
                if (alreadyInvalid(key, event)) {
                    return null;
                }
                event.setFromServer(fromServer);
                event.setVersionTag(holder.getVersionTag());
                if (clientEvent != null) {
                    clientEvent.setVersionTag(holder.getVersionTag());
                }
            }
            // the value to the server
            if (!fromServer) {
                event.setNewEventId(this.cache.getDistributedSystem());
            }
            try {
                try {
                    re = basicPutEntry(event, 0L);
                    if (!fromServer && clientEvent != null) {
                        clientEvent.setVersionTag(event.getVersionTag());
                        clientEvent.isConcurrencyConflict(event.isConcurrencyConflict());
                    }
                    if (fromServer && event.getRawNewValue() == Token.TOMBSTONE) {
                        // tombstones are destroyed entries
                        return null;
                    }
                } catch (ConcurrentCacheModificationException ignore) {
                    // this means the value attempted to overwrite a newer modification and was rejected
                    if (logger.isDebugEnabled()) {
                        logger.debug("caught concurrent modification attempt when applying {}", event);
                    }
                    notifyBridgeClients(event);
                }
                if (!getDataView().isDeferredStats()) {
                    getCachePerfStats().endPut(startPut, event.isOriginRemote());
                }
            } catch (CacheWriterException cwe) {
                if (logger.isDebugEnabled()) {
                    logger.debug("findObjectInSystem: writer exception putting entry {}", event, cwe);
                }
            }
        } finally {
            event.release();
        }
    }
    if (isCreate) {
        recordMiss(re, key);
    }
    return value;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) Operation(org.apache.geode.cache.Operation) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) LoaderHelper(org.apache.geode.cache.LoaderHelper) ServerRegionProxy(org.apache.geode.cache.client.internal.ServerRegionProxy) StoredObject(org.apache.geode.internal.offheap.StoredObject) CacheLoader(org.apache.geode.cache.CacheLoader) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 38 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class PutAllPRMessage method doLocalPutAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region eventSender the endpoint server who received request from client
   *        lastModified timestamp for last modification
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didPut = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedPutAllOperation dpao = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(putAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postPutAll
        Object[] keys = new Object[putAllPRDataSize];
        for (int i = 0; i < putAllPRDataSize; ++i) {
            keys[i] = putAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (putAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < putAllPRDataSize; i++) {
                            if (putAllPRData[i].versionTag == null) {
                                putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
                                if (putAllPRData[i].versionTag != null) {
                                    putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = putAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final HashMap succeeded = new HashMap();
                PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < putAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setPutAllOperation(dpao);
                            // make sure a local update inserts a cache de-serializable
                            ev.makeSerializedNewValue();
                            // then in basicPutPart3(), the ev is added into dpao
                            try {
                                didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
                                if (didPut && logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didPut = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
                                }
                            }
                            putAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didPut) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
                                this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ignore) {
                    throw new ForceReattemptException("unable to get lock for primary, retrying... ");
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this DPAO
                        dpao.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < putAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (dpao != null)
            dpao.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) HashMap(java.util.HashMap) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) DistributedPutAllOperation(org.apache.geode.internal.cache.DistributedPutAllOperation) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 39 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class RemoveAllPRMessage method doLocalRemoveAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region
   * @param eventSender the endpoint server who received request from client
   * @param cacheWrite if true invoke cacheWriter before desrtoy
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IMSE_DONT_CATCH_IMSE")
public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember eventSender, boolean cacheWrite) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didRemove = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("RemoveAllPRMessage: doLocalRemoveAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedRemoveAllOperation op = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.removeAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for RemoveAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.REMOVEALL_DESTROY, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(removeAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("RemoveAllPRMessage.doLocalRemoveAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            op = new DistributedRemoveAllOperation(baseEvent, removeAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postRemoveAll
        Object[] keys = new Object[removeAllPRDataSize];
        for (int i = 0; i < removeAllPRDataSize; ++i) {
            keys[i] = removeAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (removeAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < removeAllPRDataSize; i++) {
                            if (removeAllPRData[i].versionTag == null) {
                                removeAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(removeAllPRData[i].getEventID());
                                if (removeAllPRData[i].versionTag != null) {
                                    removeAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = removeAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final ArrayList<Object> succeeded = new ArrayList<Object>();
                PutAllPartialResult partialKeys = new PutAllPartialResult(removeAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < removeAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setRemoveAllOperation(op);
                            // then in basicPutPart3(), the ev is added into op
                            try {
                                r.getDataView().destroyOnRemote(ev, cacheWrite, null);
                                didRemove = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally success for " + ev);
                                }
                            } catch (EntryNotFoundException ignore) {
                                didRemove = true;
                                if (ev.getVersionTag() == null) {
                                    if (logger.isDebugEnabled()) {
                                        logger.debug("doLocalRemoveAll:RemoveAll encoutered EntryNotFoundException: event={}", ev);
                                    }
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didRemove = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally encountered concurrent cache modification for " + ev);
                                }
                            }
                            removeAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didRemove) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform remove in RemoveAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.add(removeAllPRData[i].getKey());
                                this.versions.addKeyAndVersion(removeAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ex) {
                    ForceReattemptException fre = new ForceReattemptException("unable to get lock for primary, retrying... ");
                    throw fre;
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only RemoveAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this op
                        op.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postRemoveAll(op, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("RemoveAllPRMessage: partial keys applied, map to bucket {}'s keys:{}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < removeAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokeDestroyCallbacks(EnumListenerEvent.AFTER_DESTROY, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (op != null)
            op.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ArrayList(java.util.ArrayList) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) DistributedRemoveAllOperation(org.apache.geode.internal.cache.DistributedRemoveAllOperation) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 40 with CacheWriterException

use of org.apache.geode.cache.CacheWriterException in project geode by apache.

the class ProxyDUnitTest method remoteOriginOps.

/**
   * check remote ops done in a normal vm are correctly distributed to PROXY regions
   */
private void remoteOriginOps(DataPolicy dp, InterestPolicy ip) throws CacheException {
    initOtherId();
    AttributesFactory af = new AttributesFactory();
    af.setDataPolicy(dp);
    af.setSubscriptionAttributes(new SubscriptionAttributes(ip));
    af.setScope(Scope.DISTRIBUTED_ACK);
    CacheListener cl1 = new CacheListener() {

        public void afterUpdate(EntryEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterCreate(EntryEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterInvalidate(EntryEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterDestroy(EntryEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterRegionInvalidate(RegionEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterRegionDestroy(RegionEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterRegionClear(RegionEvent e) {
            clLastEvent = e;
            clInvokeCount++;
        }

        public void afterRegionCreate(RegionEvent e) {
        }

        public void afterRegionLive(RegionEvent e) {
        }

        public void close() {
        }
    };
    af.addCacheListener(cl1);
    Region r = createRootRegion("ProxyDUnitTest", af.create());
    this.clInvokeCount = 0;
    doCreateOtherVm();
    DMStats stats = getDMStats();
    long receivedMsgs = stats.getReceivedMessages();
    if (ip.isAll()) {
        getOtherVm().invoke(new CacheSerializableRunnable("do put") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.put("p", "v");
            }
        });
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.CREATE, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        // failure
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        assertEquals("v", ((EntryEvent) this.clLastEvent).getNewValue());
        assertEquals("p", ((EntryEvent) this.clLastEvent).getKey());
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do create") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.create("c", "v");
            }
        });
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.CREATE, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        assertEquals("v", ((EntryEvent) this.clLastEvent).getNewValue());
        assertEquals("c", ((EntryEvent) this.clLastEvent).getKey());
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do update") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.put("c", "v2");
            }
        });
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.UPDATE, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        assertEquals("v2", ((EntryEvent) this.clLastEvent).getNewValue());
        assertEquals("c", ((EntryEvent) this.clLastEvent).getKey());
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do invalidate") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.invalidate("c");
            }
        });
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.INVALIDATE, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getNewValue());
        assertEquals("c", ((EntryEvent) this.clLastEvent).getKey());
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do destroy") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.destroy("c");
            }
        });
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.DESTROY, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getNewValue());
        assertEquals("c", ((EntryEvent) this.clLastEvent).getKey());
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do putAll") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                Map m = new HashMap();
                m.put("putAllKey1", "putAllValue1");
                m.put("putAllKey2", "putAllValue2");
                r.putAll(m);
            }
        });
        assertEquals(2, this.clInvokeCount);
        // @todo darrel; check putAll events
        this.clInvokeCount = 0;
        getOtherVm().invoke(new CacheSerializableRunnable("do netsearch") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                // total miss
                assertEquals(null, r.get("loadkey"));
            }
        });
        assertEquals(0, this.clInvokeCount);
    } else {
        getOtherVm().invoke(new CacheSerializableRunnable("do entry ops") {

            public void run2() throws CacheException {
                Region r = getRootRegion("ProxyDUnitTest");
                r.put("p", "v");
                r.create("c", "v");
                // update
                r.put("c", "v");
                r.invalidate("c");
                r.destroy("c");
                {
                    Map m = new HashMap();
                    m.put("putAllKey1", "putAllValue1");
                    m.put("putAllKey2", "putAllValue2");
                    r.putAll(m);
                }
                // total miss
                assertEquals(null, r.get("loadkey"));
            }
        });
        assertEquals(0, this.clInvokeCount);
        assertEquals(0, r.size());
        // check the stats to make sure none of the above sent up messages
        assertEquals(receivedMsgs, stats.getReceivedMessages());
    }
    {
        AttributesMutator am = r.getAttributesMutator();
        CacheLoader cl = new CacheLoader() {

            public Object load(LoaderHelper helper) throws CacheLoaderException {
                if (helper.getKey().equals("loadkey")) {
                    return "loadvalue";
                } else if (helper.getKey().equals("loadexception")) {
                    throw new CacheLoaderException("expected");
                } else {
                    return null;
                }
            }

            public void close() {
            }
        };
        am.setCacheLoader(cl);
    }
    receivedMsgs = stats.getReceivedMessages();
    getOtherVm().invoke(new CacheSerializableRunnable("check net loader") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            // net load
            assertEquals("loadvalue", r.get("loadkey"));
            // total miss
            assertEquals(null, r.get("foobar"));
            try {
                r.get("loadexception");
                fail("expected CacheLoaderException");
            } catch (CacheLoaderException expected) {
            }
        }
    });
    assertTrue(stats.getReceivedMessages() > receivedMsgs);
    if (ip.isAll()) {
        assertEquals(1, this.clInvokeCount);
        assertEquals(Operation.NET_LOAD_CREATE, this.clLastEvent.getOperation());
        assertEquals(true, this.clLastEvent.isOriginRemote());
        assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
        assertEquals(null, ((EntryEvent) this.clLastEvent).getOldValue());
        assertEquals(false, ((EntryEvent) this.clLastEvent).isOldValueAvailable());
        this.clInvokeCount = 0;
    } else {
        assertEquals(0, this.clInvokeCount);
    }
    {
        AttributesMutator am = r.getAttributesMutator();
        am.setCacheLoader(null);
        CacheWriter cw = new CacheWriterAdapter() {

            public void beforeCreate(EntryEvent event) throws CacheWriterException {
                throw new CacheWriterException("expected");
            }
        };
        am.setCacheWriter(cw);
    }
    receivedMsgs = stats.getReceivedMessages();
    getOtherVm().invoke(new CacheSerializableRunnable("check net write") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            try {
                r.put("putkey", "putvalue");
                fail("expected CacheWriterException");
            } catch (CacheWriterException expected) {
            }
        }
    });
    assertTrue(stats.getReceivedMessages() > receivedMsgs);
    {
        AttributesMutator am = r.getAttributesMutator();
        am.setCacheWriter(null);
    }
    assertEquals(0, this.clInvokeCount);
    this.clLastEvent = null;
    getOtherVm().invoke(new CacheSerializableRunnable("check region invalidate") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            r.invalidateRegion();
        }
    });
    assertEquals(1, this.clInvokeCount);
    assertEquals(Operation.REGION_INVALIDATE, this.clLastEvent.getOperation());
    assertEquals(true, this.clLastEvent.isOriginRemote());
    assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
    this.clLastEvent = null;
    getOtherVm().invoke(new CacheSerializableRunnable("check region clear") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            r.clear();
        }
    });
    assertEquals(2, this.clInvokeCount);
    assertEquals(Operation.REGION_CLEAR, this.clLastEvent.getOperation());
    assertEquals(true, this.clLastEvent.isOriginRemote());
    assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
    this.clLastEvent = null;
    getOtherVm().invoke(new CacheSerializableRunnable("check region destroy") {

        public void run2() throws CacheException {
            Region r = getRootRegion("ProxyDUnitTest");
            r.destroyRegion();
        }
    });
    assertEquals(3, this.clInvokeCount);
    assertEquals(Operation.REGION_DESTROY, this.clLastEvent.getOperation());
    assertEquals(true, this.clLastEvent.isOriginRemote());
    assertEquals(this.otherId, this.clLastEvent.getDistributedMember());
    assertTrue(r.isDestroyed());
}
Also used : CacheWriterAdapter(org.apache.geode.cache.util.CacheWriterAdapter) DMStats(org.apache.geode.distributed.internal.DMStats) CacheException(org.apache.geode.cache.CacheException) HashMap(java.util.HashMap) RegionEvent(org.apache.geode.cache.RegionEvent) CacheListener(org.apache.geode.cache.CacheListener) LoaderHelper(org.apache.geode.cache.LoaderHelper) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) EntryEvent(org.apache.geode.cache.EntryEvent) CacheWriter(org.apache.geode.cache.CacheWriter) Region(org.apache.geode.cache.Region) CacheLoader(org.apache.geode.cache.CacheLoader) HashMap(java.util.HashMap) Map(java.util.Map) SubscriptionAttributes(org.apache.geode.cache.SubscriptionAttributes) AttributesMutator(org.apache.geode.cache.AttributesMutator) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Aggregations

CacheWriterException (org.apache.geode.cache.CacheWriterException)50 Region (org.apache.geode.cache.Region)22 Test (org.junit.Test)14 EntryEvent (org.apache.geode.cache.EntryEvent)13 Released (org.apache.geode.internal.offheap.annotations.Released)12 AttributesFactory (org.apache.geode.cache.AttributesFactory)11 CacheException (org.apache.geode.cache.CacheException)11 TimeoutException (org.apache.geode.cache.TimeoutException)11 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)10 CacheWriterAdapter (org.apache.geode.cache.util.CacheWriterAdapter)9 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)9 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)8 VM (org.apache.geode.test.dunit.VM)8 Cache (org.apache.geode.cache.Cache)7 RegionEvent (org.apache.geode.cache.RegionEvent)7 Host (org.apache.geode.test.dunit.Host)7 IOException (java.io.IOException)6 CacheWriter (org.apache.geode.cache.CacheWriter)6 EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)6 InternalGemFireError (org.apache.geode.InternalGemFireError)5