Search in sources :

Example 86 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class DurableClientInfoResponse method create.

/**
   * Returns a {@code DurableClientInfoResponse} that will be returned to the specified recipient.
   */
public static DurableClientInfoResponse create(DistributionManager dm, InternalDistributedMember recipient, DurableClientInfoRequest request) {
    DurableClientInfoResponse m = new DurableClientInfoResponse();
    m.setRecipient(recipient);
    try {
        InternalCache c = (InternalCache) CacheFactory.getInstanceCloseOk(dm.getSystem());
        if (!c.getCacheServers().isEmpty()) {
            CacheServerImpl server = (CacheServerImpl) c.getCacheServers().iterator().next();
            switch(request.action) {
                case DurableClientInfoRequest.HAS_DURABLE_CLIENT_REQUEST:
                    {
                        m.returnVal = server.getAcceptor().getCacheClientNotifier().hasDurableClient(request.durableId);
                        break;
                    }
                case DurableClientInfoRequest.IS_PRIMARY_FOR_DURABLE_CLIENT_REQUEST:
                    {
                        m.returnVal = server.getAcceptor().getCacheClientNotifier().hasPrimaryForDurableClient(request.durableId);
                        break;
                    }
            }
        }
    } catch (CacheClosedException ignore) {
    // do nothing
    }
    return m;
}
Also used : InternalCache(org.apache.geode.internal.cache.InternalCache) CacheServerImpl(org.apache.geode.internal.cache.CacheServerImpl) CacheClosedException(org.apache.geode.cache.CacheClosedException)

Example 87 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class PRHARedundancyProvider method createBucketAtomically.

/**
   * Creates bucket atomically by creating all the copies to satisfy redundancy. In case all copies
   * can not be created, a PartitionedRegionStorageException is thrown to the user and
   * BucketBackupMessage is sent to the nodes to make copies of a bucket that was only partially
   * created. Other VMs are informed of bucket creation through updates through their
   * {@link BucketAdvisor.BucketProfile}s.
   * 
   * <p>
   * This method is synchronized to enforce a single threaded ordering, allowing for a more accurate
   * picture of bucket distribution in the face of concurrency. See bug 37275.
   * </p>
   * 
   * This method is now slightly misnamed. Another member could be in the process of creating this
   * same bucket at the same time.
   * 
   * @param bucketId Id of the bucket to be created.
   * @param newBucketSize size of the first entry.
   * @param startTime a time stamp prior to calling the method, used to update bucket creation stats
   * @return the primary member for the newly created bucket
   * @throws PartitionedRegionStorageException if required # of buckets can not be created to
   *         satisfy redundancy.
   * @throws PartitionedRegionException if d-lock can not be acquired to create bucket.
   * @throws PartitionOfflineException if persistent data recovery is not complete for a partitioned
   *         region referred to in the query.
   */
public InternalDistributedMember createBucketAtomically(final int bucketId, final int newBucketSize, final long startTime, final boolean finishIncompleteCreation, String partitionName) throws PartitionedRegionStorageException, PartitionedRegionException, PartitionOfflineException {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    prRegion.checkPROffline();
    // If there are insufficient stores throw *before* we try acquiring the
    // (very expensive) bucket lock or the (somewhat expensive) monitor on this
    earlySufficientStoresCheck(partitionName);
    synchronized (this) {
        if (this.prRegion.getCache().isCacheAtShutdownAll()) {
            throw new CacheClosedException("Cache is shutting down");
        }
        if (isDebugEnabled) {
            logger.debug("Starting atomic creation of bucketId={}", this.prRegion.bucketStringForLogs(bucketId));
        }
        Collection<InternalDistributedMember> acceptedMembers = // ArrayList<DataBucketStores>
        new ArrayList<InternalDistributedMember>();
        Set<InternalDistributedMember> excludedMembers = new HashSet<InternalDistributedMember>();
        ArrayListWithClearState<InternalDistributedMember> failedMembers = new ArrayListWithClearState<InternalDistributedMember>();
        final long timeOut = System.currentTimeMillis() + computeTimeout();
        BucketMembershipObserver observer = null;
        boolean needToElectPrimary = true;
        InternalDistributedMember bucketPrimary = null;
        try {
            this.prRegion.checkReadiness();
            Bucket toCreate = this.prRegion.getRegionAdvisor().getBucket(bucketId);
            if (!finishIncompleteCreation) {
                bucketPrimary = this.prRegion.getBucketPrimary(bucketId);
                if (bucketPrimary != null) {
                    if (isDebugEnabled) {
                        logger.debug("during atomic creation, discovered that the primary already exists {} returning early", bucketPrimary);
                    }
                    needToElectPrimary = false;
                    return bucketPrimary;
                }
            }
            observer = new BucketMembershipObserver(toCreate).beginMonitoring();
            // track if insufficient data stores have been
            boolean loggedInsufficentStores = false;
            // detected
            for (; ; ) {
                this.prRegion.checkReadiness();
                if (this.prRegion.getCache().isCacheAtShutdownAll()) {
                    if (isDebugEnabled) {
                        logger.debug("Aborted createBucketAtomically due to ShutdownAll");
                    }
                    throw new CacheClosedException("Cache is shutting down");
                }
                // this.prRegion.getCache().getLogger().config(
                // "DEBUG createBucketAtomically: "
                // + " bucketId=" + this.prRegion.getBucketName(bucketId) +
                // " accepted: " + acceptedMembers +
                // " failed: " + failedMembers);
                long timeLeft = timeOut - System.currentTimeMillis();
                if (timeLeft < 0) {
                    // It took too long.
                    timedOut(this.prRegion, getAllStores(partitionName), acceptedMembers, ALLOCATE_ENOUGH_MEMBERS_TO_HOST_BUCKET.toLocalizedString(), computeTimeout());
                // NOTREACHED
                }
                if (isDebugEnabled) {
                    logger.debug("createBucketAtomically: have {} ms left to finish this", timeLeft);
                }
                // Always go back to the advisor, see if any fresh data stores are
                // present.
                Set<InternalDistributedMember> allStores = getAllStores(partitionName);
                loggedInsufficentStores = checkSufficientStores(allStores, loggedInsufficentStores);
                InternalDistributedMember candidate = createBucketInstance(bucketId, newBucketSize, excludedMembers, acceptedMembers, failedMembers, timeOut, allStores);
                if (candidate != null) {
                    if (this.prRegion.getDistributionManager().enforceUniqueZone()) {
                        // enforceUniqueZone property has no effect for a loner. Fix for defect #47181
                        if (!(this.prRegion.getDistributionManager() instanceof LonerDistributionManager)) {
                            Set<InternalDistributedMember> exm = getBuddyMembersInZone(candidate, allStores);
                            exm.remove(candidate);
                            exm.removeAll(acceptedMembers);
                            excludedMembers.addAll(exm);
                        } else {
                            // log a warning if Loner
                            logger.warn(LocalizedMessage.create(LocalizedStrings.GemFireCache_ENFORCE_UNIQUE_HOST_NOT_APPLICABLE_FOR_LONER));
                        }
                    }
                }
                // Get an updated list of bucket owners, which should include
                // buckets created concurrently with this createBucketAtomically call
                acceptedMembers = prRegion.getRegionAdvisor().getBucketOwners(bucketId);
                if (isDebugEnabled) {
                    logger.debug("Accepted members: {}", acceptedMembers);
                }
                // the candidate has accepted
                if (bucketPrimary == null && acceptedMembers.contains(candidate)) {
                    bucketPrimary = candidate;
                }
                // prune out the stores that have left
                verifyBucketNodes(excludedMembers, partitionName);
                // Note - we used to wait for the created bucket to become primary here
                // if this is a colocated region. We no longer need to do that, because
                // the EndBucketMessage is sent out after bucket creation completes to
                // select the primary.
                // Have we exhausted all candidates?
                final int potentialCandidateCount = (allStores.size() - (excludedMembers.size() + acceptedMembers.size() + failedMembers.size()));
                // Determining exhausted members competes with bucket balancing; it's
                // important to re-visit all failed members since "failed" set may
                // contain datastores which at the moment are imbalanced, but yet could
                // be candidates. If the failed members list is empty, its expected
                // that the next iteration clears the (already empty) list.
                final boolean exhaustedPotentialCandidates = failedMembers.wasCleared() && potentialCandidateCount <= 0;
                final boolean redundancySatisfied = acceptedMembers.size() > this.prRegion.getRedundantCopies();
                final boolean bucketNotCreated = acceptedMembers.size() == 0;
                if (isDebugEnabled) {
                    logger.debug("potentialCandidateCount={}, exhaustedPotentialCandidates={}, redundancySatisfied={}, bucketNotCreated={}", potentialCandidateCount, exhaustedPotentialCandidates, redundancySatisfied, bucketNotCreated);
                }
                if (bucketNotCreated) {
                    // if we haven't managed to create the bucket on any nodes, retry.
                    continue;
                }
                if (exhaustedPotentialCandidates && !redundancySatisfied) {
                    insufficientStores(allStores, acceptedMembers, true);
                }
                // Fix for bug 39283
                if (redundancySatisfied || exhaustedPotentialCandidates) {
                    // Tell one of the members to become primary.
                    // The rest of the members will be allowed to
                    // volunteer for primary.
                    endBucketCreation(bucketId, acceptedMembers, bucketPrimary, partitionName);
                    final int expectedRemoteHosts = acceptedMembers.size() - (acceptedMembers.contains(this.prRegion.getMyId()) ? 1 : 0);
                    boolean interrupted = Thread.interrupted();
                    try {
                        BucketMembershipObserverResults results = observer.waitForOwnersGetPrimary(expectedRemoteHosts, acceptedMembers, partitionName);
                        if (results.problematicDeparture) {
                            // BZZZT! Member left. Start over.
                            continue;
                        }
                        bucketPrimary = results.primary;
                    } catch (InterruptedException e) {
                        interrupted = true;
                        this.prRegion.getCancelCriterion().checkCancelInProgress(e);
                    } finally {
                        if (interrupted) {
                            Thread.currentThread().interrupt();
                        }
                    }
                    needToElectPrimary = false;
                    return bucketPrimary;
                }
            // almost done
            }
        // for
        } catch (CancelException e) {
            // Fix for 43544 - We don't need to elect a primary
            // if the cache was closed. The other members will
            // take care of it. This ensures we don't compromise
            // redundancy.
            needToElectPrimary = false;
            throw e;
        } catch (RegionDestroyedException e) {
            // Fix for 43544 - We don't need to elect a primary
            // if the region was destroyed. The other members will
            // take care of it. This ensures we don't compromise
            // redundancy.
            needToElectPrimary = false;
            throw e;
        } catch (PartitionOfflineException e) {
            throw e;
        } catch (RuntimeException e) {
            if (isDebugEnabled) {
                logger.debug("Unable to create new bucket {}: {}", bucketId, e.getMessage(), e);
            }
            // than reattempting on other nodes?
            if (!finishIncompleteCreation) {
                cleanUpBucket(bucketId);
            }
            throw e;
        } finally {
            if (observer != null) {
                observer.stopMonitoring();
            }
            // Try to make sure everyone that created the bucket can volunteer for primary
            if (needToElectPrimary) {
                try {
                    endBucketCreation(bucketId, prRegion.getRegionAdvisor().getBucketOwners(bucketId), bucketPrimary, partitionName);
                } catch (Exception e) {
                    // if region is going down, then no warning level logs
                    if (e instanceof CancelException || e instanceof CacheClosedException || (prRegion.getCancelCriterion().isCancelInProgress())) {
                        logger.debug("Exception trying choose a primary after bucket creation failure", e);
                    } else {
                        logger.warn("Exception trying choose a primary after bucket creation failure", e);
                    }
                }
            }
        }
    }
// synchronized(this)
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) CancelException(org.apache.geode.CancelException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) CancelException(org.apache.geode.CancelException) LonerDistributionManager(org.apache.geode.distributed.internal.LonerDistributionManager)

Example 88 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class PartitionedRegion method postPutAllSend.

/**
   * Create PutAllPRMsgs for each bucket, and send them.
   * 
   * @param putAllOp DistributedPutAllOperation object.
   * @param successfulPuts not used in PartitionedRegion.
   */
@Override
public long postPutAllSend(DistributedPutAllOperation putAllOp, VersionedObjectList successfulPuts) {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    if (cache.isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap prMsgMap = putAllOp.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(putAllOp.putAllDataSize);
    // clear the successfulPuts list since we're actually doing the puts here
    // and the basicPutAll work was just a way to build the DPAO object
    Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulPuts.size());
    successfulPuts.clearVersions();
    Iterator itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) itor.next();
        Integer bucketId = (Integer) mapEntry.getKey();
        PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue();
        checkReadiness();
        long then = 0;
        if (isDebugEnabled) {
            then = System.currentTimeMillis();
        }
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
            if (versions.size() > 0) {
                partialKeys.addKeysAndVersions(versions);
                versions.saveVersions(keyToVersionMap);
            } else if (!this.concurrencyChecksEnabled) {
                // no keys returned if not versioned
                Set keys = prMsg.getKeys();
                partialKeys.addKeys(keys);
            }
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            if (isDebugEnabled) {
                logger.debug("PR.postPutAll encountered PutAllPartialResultException, ", pre);
            }
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            if (isDebugEnabled) {
                logger.debug("PR.postPutAll encountered exception at sendMsgByBucket, ", ex);
            }
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
        if (isDebugEnabled) {
            long now = System.currentTimeMillis();
            if ((now - then) >= 10000) {
                logger.debug("PR.sendMsgByBucket took " + (now - then) + " ms");
            }
        }
    }
    this.prStats.endPutAll(startTime);
    if (!keyToVersionMap.isEmpty()) {
        for (Iterator it = successfulPuts.getKeys().iterator(); it.hasNext(); ) {
            successfulPuts.addVersion(keyToVersionMap.get(it.next()));
        }
        keyToVersionMap.clear();
    }
    if (partialKeys.hasFailure()) {
        logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
        if (putAllOp.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
    return -1;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) ResultsSet(org.apache.geode.cache.query.internal.ResultsSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) TimeoutException(org.apache.geode.cache.TimeoutException) IndexCreationException(org.apache.geode.cache.query.IndexCreationException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireException(org.apache.geode.InternalGemFireException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) LockServiceDestroyedException(org.apache.geode.distributed.LockServiceDestroyedException) GatewaySenderException(org.apache.geode.internal.cache.wan.GatewaySenderException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) GatewaySenderConfigurationException(org.apache.geode.internal.cache.wan.GatewaySenderConfigurationException) ExecutionException(java.util.concurrent.ExecutionException) ReplyException(org.apache.geode.distributed.internal.ReplyException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) EntryExistsException(org.apache.geode.cache.EntryExistsException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) NoSuchElementException(java.util.NoSuchElementException) QueryException(org.apache.geode.cache.query.QueryException) PartitionNotAvailableException(org.apache.geode.cache.partition.PartitionNotAvailableException) LowMemoryException(org.apache.geode.cache.LowMemoryException) InternalFunctionInvocationTargetException(org.apache.geode.internal.cache.execute.InternalFunctionInvocationTargetException) IndexInvalidException(org.apache.geode.cache.query.IndexInvalidException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) RegionExistsException(org.apache.geode.cache.RegionExistsException) CancelException(org.apache.geode.CancelException) DiskAccessException(org.apache.geode.cache.DiskAccessException) CacheWriterException(org.apache.geode.cache.CacheWriterException) TransactionException(org.apache.geode.cache.TransactionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) EmptyRegionFunctionException(org.apache.geode.cache.execute.EmptyRegionFunctionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PutAllPRMessage(org.apache.geode.internal.cache.partitioned.PutAllPRMessage) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) PREntriesIterator(org.apache.geode.internal.cache.partitioned.PREntriesIterator) Iterator(java.util.Iterator) CancelException(org.apache.geode.CancelException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashMap(java.util.HashMap)

Example 89 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class DataCommands method exportData.

@CliCommand(value = CliStrings.EXPORT_DATA, help = CliStrings.EXPORT_DATA__HELP)
@CliMetaData(relatedTopic = { CliStrings.TOPIC_GEODE_DATA, CliStrings.TOPIC_GEODE_REGION })
public Result exportData(@CliOption(key = CliStrings.EXPORT_DATA__REGION, mandatory = true, optionContext = ConverterHint.REGION_PATH, help = CliStrings.EXPORT_DATA__REGION__HELP) String regionName, @CliOption(key = CliStrings.EXPORT_DATA__FILE, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, mandatory = true, help = CliStrings.EXPORT_DATA__FILE__HELP) String filePath, @CliOption(key = CliStrings.EXPORT_DATA__MEMBER, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, optionContext = ConverterHint.MEMBERIDNAME, mandatory = true, help = CliStrings.EXPORT_DATA__MEMBER__HELP) String memberNameOrId) {
    this.securityService.authorizeRegionRead(regionName);
    final DistributedMember targetMember = CliUtil.getDistributedMemberByNameOrId(memberNameOrId);
    Result result;
    if (!filePath.endsWith(CliStrings.GEODE_DATA_FILE_EXTENSION)) {
        return ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.INVALID_FILE_EXTENSION, CliStrings.GEODE_DATA_FILE_EXTENSION));
    }
    try {
        if (targetMember != null) {
            final String[] args = { regionName, filePath };
            ResultCollector<?, ?> rc = CliUtil.executeFunction(exportDataFunction, args, targetMember);
            List<Object> results = (List<Object>) rc.getResult();
            if (results != null) {
                Object resultObj = results.get(0);
                if (resultObj instanceof String) {
                    result = ResultBuilder.createInfoResult((String) resultObj);
                } else if (resultObj instanceof Exception) {
                    result = ResultBuilder.createGemFireErrorResult(((Exception) resultObj).getMessage());
                } else {
                    result = ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COMMAND_FAILURE_MESSAGE, CliStrings.EXPORT_DATA));
                }
            } else {
                result = ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COMMAND_FAILURE_MESSAGE, CliStrings.EXPORT_DATA));
            }
        } else {
            result = ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.EXPORT_DATA__MEMBER__NOT__FOUND, memberNameOrId));
        }
    } catch (CacheClosedException e) {
        result = ResultBuilder.createGemFireErrorResult(e.getMessage());
    } catch (FunctionInvocationTargetException e) {
        result = ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COMMAND_FAILURE_MESSAGE, CliStrings.IMPORT_DATA));
    }
    return result;
}
Also used : DistributedMember(org.apache.geode.distributed.DistributedMember) FunctionInvocationTargetException(org.apache.geode.cache.execute.FunctionInvocationTargetException) List(java.util.List) ArrayList(java.util.ArrayList) CacheClosedException(org.apache.geode.cache.CacheClosedException) TimeoutException(java.util.concurrent.TimeoutException) FunctionInvocationTargetException(org.apache.geode.cache.execute.FunctionInvocationTargetException) CacheClosedException(org.apache.geode.cache.CacheClosedException) Result(org.apache.geode.management.cli.Result) DataCommandResult(org.apache.geode.management.internal.cli.domain.DataCommandResult) CliCommand(org.springframework.shell.core.annotation.CliCommand) CliMetaData(org.apache.geode.management.cli.CliMetaData)

Example 90 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class DistributedRegionFunction method execute.

@Override
public void execute(FunctionContext context) {
    RegionFunctionContext rcontext = (RegionFunctionContext) context;
    Region<Object, Object> region = rcontext.getDataSet();
    InternalDistributedSystem sys = InternalDistributedSystem.getConnectedInstance();
    sys.getLogWriter().fine("DistributedRegionFunction#execute( " + rcontext + " )");
    Assert.assertTrue(region.getAttributes().getDataPolicy().withStorage());
    Assert.assertTrue(region.getAttributes().getDataPolicy() != DataPolicy.NORMAL);
    Assert.assertTrue(rcontext.getFilter().size() == 20);
    long startTime = System.currentTimeMillis();
    // the body itself
    if (Boolean.TRUE.equals(rcontext.getArguments())) {
        // do not close cache in retry
        if (!rcontext.isPossibleDuplicate()) {
            sys.disconnect();
            throw new CacheClosedException("Throwing CacheClosedException " + "to simulate failover during function exception");
        }
    } else {
        WaitCriterion wc = new WaitCriterion() {

            String excuse;

            public boolean done() {
                return false;
            }

            public String description() {
                return excuse;
            }
        };
        Wait.waitForCriterion(wc, 12000, 500, false);
    }
    long endTime = System.currentTimeMillis();
    // intentionally doing region operation to cause cacheClosedException
    region.put("execKey-201", new Integer(201));
    if (rcontext.isPossibleDuplicate()) {
        // Below operation is done when the
        // function is reexecuted
        region.put("execKey-202", new Integer(202));
        region.put("execKey-203", new Integer(203));
    }
    sys.getLogWriter().fine("Time wait for Function Execution = " + (endTime - startTime));
    for (int i = 0; i < 5000; i++) {
        context.getResultSender().sendResult(Boolean.TRUE);
    }
    context.getResultSender().lastResult(Boolean.TRUE);
}
Also used : WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) RegionFunctionContext(org.apache.geode.cache.execute.RegionFunctionContext) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) CacheClosedException(org.apache.geode.cache.CacheClosedException)

Aggregations

CacheClosedException (org.apache.geode.cache.CacheClosedException)95 Cache (org.apache.geode.cache.Cache)26 Test (org.junit.Test)21 IOException (java.io.IOException)20 ArrayList (java.util.ArrayList)20 FunctionException (org.apache.geode.cache.execute.FunctionException)20 FunctionInvocationTargetException (org.apache.geode.cache.execute.FunctionInvocationTargetException)20 CancelException (org.apache.geode.CancelException)18 Region (org.apache.geode.cache.Region)18 Host (org.apache.geode.test.dunit.Host)17 VM (org.apache.geode.test.dunit.VM)17 InternalCache (org.apache.geode.internal.cache.InternalCache)16 IgnoredException (org.apache.geode.test.dunit.IgnoredException)16 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)16 DistributedMember (org.apache.geode.distributed.DistributedMember)14 ReplyException (org.apache.geode.distributed.internal.ReplyException)14 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)12 Execution (org.apache.geode.cache.execute.Execution)11 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)11 HashMap (java.util.HashMap)10