Search in sources :

Example 91 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class Oplog method recoverValuesIfNeeded.

/**
   * This method is called by the async value recovery task to recover the values from the crf if
   * the keys were recovered from the krf.
   */
public void recoverValuesIfNeeded(Map<Long, DiskRecoveryStore> diskRecoveryStores) {
    // Early out if we start closing the parent.
    if (getParent().isClosing()) {
        return;
    }
    List<KRFEntry> sortedLiveEntries;
    HashMap<Long, DiskRegionInfo> targetRegions = new HashMap<Long, DiskRegionInfo>(this.regionMap);
    synchronized (diskRecoveryStores) {
        Iterator<DiskRecoveryStore> itr = diskRecoveryStores.values().iterator();
        while (itr.hasNext()) {
            DiskRecoveryStore store = itr.next();
            if (isLruValueRecoveryDisabled(store) || store.lruLimitExceeded()) {
                itr.remove();
            }
        }
        // Get the a sorted list of live entries from the target regions
        targetRegions.keySet().retainAll(diskRecoveryStores.keySet());
    }
    sortedLiveEntries = getSortedLiveEntries(targetRegions.values());
    if (sortedLiveEntries == null) {
        // There are no live entries in this oplog to recover.
        return;
    }
    final ByteArrayDataInput in = new ByteArrayDataInput();
    for (KRFEntry entry : sortedLiveEntries) {
        // Early out if we start closing the parent.
        if (getParent().isClosing()) {
            return;
        }
        DiskEntry diskEntry = entry.getDiskEntry();
        DiskRegionView diskRegionView = entry.getDiskRegionView();
        long diskRegionId = diskRegionView.getId();
        synchronized (diskRecoveryStores) {
            DiskRecoveryStore diskRecoveryStore = diskRecoveryStores.get(diskRegionId);
            if (diskRecoveryStore == null) {
                continue;
            }
            // Reset the disk region view because it may have changed
            // due to the region being created.
            diskRegionView = diskRecoveryStore.getDiskRegionView();
            if (diskRegionView == null) {
                continue;
            }
            if (diskRecoveryStore.lruLimitExceeded()) {
                diskRecoveryStores.remove(diskRegionId);
                continue;
            }
            if (diskRegionView.isEntriesMapIncompatible()) {
                // Refetch the disk entry because it may have changed due to copying
                // an incompatible region map
                diskEntry = (DiskEntry) diskRecoveryStore.getRegionMap().getEntryInVM(diskEntry.getKey());
                if (diskEntry == null) {
                    continue;
                }
            }
            synchronized (diskEntry) {
                // Make sure the entry hasn't been modified
                if (diskEntry.getDiskId() != null && diskEntry.getDiskId().getOplogId() == oplogId) {
                    try {
                        DiskEntry.Helper.recoverValue(diskEntry, getOplogId(), diskRecoveryStore, in);
                    } catch (RegionDestroyedException ignore) {
                        // This region has been destroyed, stop recovering from it.
                        diskRecoveryStores.remove(diskRegionId);
                    }
                }
            }
        }
    }
}
Also used : Long2ObjectOpenHashMap(it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 92 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class PutAllPRMessage method doLocalPutAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region eventSender the endpoint server who received request from client
   *        lastModified timestamp for last modification
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didPut = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedPutAllOperation dpao = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(putAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postPutAll
        Object[] keys = new Object[putAllPRDataSize];
        for (int i = 0; i < putAllPRDataSize; ++i) {
            keys[i] = putAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (putAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < putAllPRDataSize; i++) {
                            if (putAllPRData[i].versionTag == null) {
                                putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
                                if (putAllPRData[i].versionTag != null) {
                                    putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = putAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final HashMap succeeded = new HashMap();
                PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < putAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setPutAllOperation(dpao);
                            // make sure a local update inserts a cache de-serializable
                            ev.makeSerializedNewValue();
                            // then in basicPutPart3(), the ev is added into dpao
                            try {
                                didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
                                if (didPut && logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didPut = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
                                }
                            }
                            putAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didPut) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
                                this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ignore) {
                    throw new ForceReattemptException("unable to get lock for primary, retrying... ");
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this DPAO
                        dpao.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < putAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (dpao != null)
            dpao.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) HashMap(java.util.HashMap) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) DistributedPutAllOperation(org.apache.geode.internal.cache.DistributedPutAllOperation) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 93 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class RemoveAllPRMessage method doLocalRemoveAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region
   * @param eventSender the endpoint server who received request from client
   * @param cacheWrite if true invoke cacheWriter before desrtoy
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IMSE_DONT_CATCH_IMSE")
public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember eventSender, boolean cacheWrite) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didRemove = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("RemoveAllPRMessage: doLocalRemoveAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedRemoveAllOperation op = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.removeAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for RemoveAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.REMOVEALL_DESTROY, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(removeAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("RemoveAllPRMessage.doLocalRemoveAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            op = new DistributedRemoveAllOperation(baseEvent, removeAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postRemoveAll
        Object[] keys = new Object[removeAllPRDataSize];
        for (int i = 0; i < removeAllPRDataSize; ++i) {
            keys[i] = removeAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (removeAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < removeAllPRDataSize; i++) {
                            if (removeAllPRData[i].versionTag == null) {
                                removeAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(removeAllPRData[i].getEventID());
                                if (removeAllPRData[i].versionTag != null) {
                                    removeAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = removeAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final ArrayList<Object> succeeded = new ArrayList<Object>();
                PutAllPartialResult partialKeys = new PutAllPartialResult(removeAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < removeAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setRemoveAllOperation(op);
                            // then in basicPutPart3(), the ev is added into op
                            try {
                                r.getDataView().destroyOnRemote(ev, cacheWrite, null);
                                didRemove = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally success for " + ev);
                                }
                            } catch (EntryNotFoundException ignore) {
                                didRemove = true;
                                if (ev.getVersionTag() == null) {
                                    if (logger.isDebugEnabled()) {
                                        logger.debug("doLocalRemoveAll:RemoveAll encoutered EntryNotFoundException: event={}", ev);
                                    }
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didRemove = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("RemoveAllPRMessage.doLocalRemoveAll:removeLocally encountered concurrent cache modification for " + ev);
                                }
                            }
                            removeAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didRemove) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform remove in RemoveAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.add(removeAllPRData[i].getKey());
                                this.versions.addKeyAndVersion(removeAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ex) {
                    ForceReattemptException fre = new ForceReattemptException("unable to get lock for primary, retrying... ");
                    throw fre;
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only RemoveAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this op
                        op.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postRemoveAll(op, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("RemoveAllPRMessage: partial keys applied, map to bucket {}'s keys:{}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < removeAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, removeAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokeDestroyCallbacks(EnumListenerEvent.AFTER_DESTROY, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (op != null)
            op.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ArrayList(java.util.ArrayList) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) DistributedRemoveAllOperation(org.apache.geode.internal.cache.DistributedRemoveAllOperation) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 94 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class RegionCreation method create.

/**
   * Creates a {@link Region} with the given parent using the description provided by this
   * <code>RegionCreation</code>.
   *
   * @throws TimeoutException
   * @throws CacheWriterException
   * @throws RegionExistsException
   * @throws IllegalStateException
   */
void create(Region parent) throws TimeoutException, CacheWriterException, RegionExistsException {
    // Validate the attributes before creating the sub-region
    this.attrs.inheritAttributes(parent.getCache());
    this.attrs.prepareForValidation();
    this.attrs.setIndexes(this.indexes);
    Region me = null;
    try {
        me = parent.createSubregion(this.name, new AttributesFactory(this.attrs).create());
    } catch (RegionExistsException ex) {
        me = ex.getRegion();
        setMutableAttributes(me);
    } catch (RegionDestroyedException ex) {
        // Region was concurrently destroyed.
        cache.getLoggerI18n().warning(LocalizedStrings.RegionCreation_REGION_DESTROYED_DURING_INITIALIZATION, this.name);
    // do nothing
    }
    if (me != null) {
        // Register named region attributes
        String id = this.attrs.getId();
        if (id != null) {
            RegionAttributes realAttrs = me.getAttributes();
            me.getCache().setRegionAttributes(id, realAttrs);
        }
        fillIn(me);
    }
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) RegionExistsException(org.apache.geode.cache.RegionExistsException)

Example 95 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRCountStarQueries.

public CacheSerializableRunnable getCacheSerializableRunnableForPRCountStarQueries(final String regionName, final String localRegion) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRCountStarQuery") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the localRegion and the PR region
            String[] queries = new String[] { "select COUNT(*) from /" + regionName, "select COUNT(*) from /" + regionName + " where ID > 0", "select COUNT(*) from /" + regionName + " where ID > 0 AND status='active'", "select COUNT(*) from /" + regionName + " where ID > 0 OR status='active'", "select COUNT(*) from /" + regionName + " where ID > 0 AND status LIKE 'act%'", "select COUNT(*) from /" + regionName + " where ID > 0 OR status LIKE 'ina%'", "select COUNT(*) from /" + regionName + " where ID IN SET(1, 2, 3, 4, 5)", "select COUNT(*) from /" + regionName + " where NOT (ID > 5)", "select DISTINCT COUNT(*) from /" + regionName + " where ID > 0", "select DISTINCT COUNT(*) from /" + regionName + " where ID > 0 AND status='active'", "select DISTINCT COUNT(*) from /" + regionName + " where ID > 0 OR status='active'", "select DISTINCT COUNT(*) from /" + regionName + " where ID > 0 AND status LIKE 'act%'", "select DISTINCT COUNT(*) from /" + regionName + " where ID > 0 OR status LIKE 'ina%'", "select DISTINCT COUNT(*) from /" + regionName + " where ID IN SET(1, 2, 3, 4, 5)", "select DISTINCT COUNT(*) from /" + regionName + " where NOT (ID > 5)", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND pos.secId = 'IBM'", "select DISTINCT COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND pos.secId = 'IBM'", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND pos.secId = 'IBM' LIMIT 5", "select DISTINCT COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND pos.secId = 'IBM' ORDER BY p.ID", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND p.status = 'active' AND pos.secId = 'IBM'", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 AND p.status = 'active' OR pos.secId = 'IBM'", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 OR p.status = 'active' OR pos.secId = 'IBM'", "select COUNT(*) from /" + regionName + " p, p.positions.values pos where p.ID > 0 OR p.status = 'active' OR pos.secId = 'IBM' LIMIT 150" };
            Object[][] r = new Object[queries.length][2];
            Region region = cache.getRegion(regionName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            QueryService qs = getCache().getQueryService();
            Object[] params;
            try {
                for (int j = 0; j < queries.length; j++) {
                    String qStr = null;
                    synchronized (region) {
                        // Execute on PR region.
                        qStr = queries[j];
                        SelectResults sr = (SelectResults) qs.newQuery(qStr).execute();
                        r[j][0] = sr;
                        // Execute on local region.
                        qStr = queries[j];
                        SelectResults srr = (SelectResults) qs.newQuery(qStr.replace(regionName, localRegion)).execute();
                        r[j][1] = srr;
                    }
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
                StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                ssORrs.CompareCountStarQueryResultsWithoutAndWithIndexes(r, queries.length, true, queries);
            } catch (QueryInvocationTargetException e) {
                // not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Aggregations

RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)124 CancelException (org.apache.geode.CancelException)41 LocalRegion (org.apache.geode.internal.cache.LocalRegion)37 Region (org.apache.geode.cache.Region)35 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)28 IOException (java.io.IOException)25 Cache (org.apache.geode.cache.Cache)20 CacheException (org.apache.geode.cache.CacheException)19 QueryException (org.apache.geode.cache.query.QueryException)16 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)16 ReplyException (org.apache.geode.distributed.internal.ReplyException)16 CacheClosedException (org.apache.geode.cache.CacheClosedException)14 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)14 SelectResults (org.apache.geode.cache.query.SelectResults)13 EventID (org.apache.geode.internal.cache.EventID)13 Test (org.junit.Test)13 Iterator (java.util.Iterator)12 TransactionDataNotColocatedException (org.apache.geode.cache.TransactionDataNotColocatedException)12 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)12 QueryService (org.apache.geode.cache.query.QueryService)11