Search in sources :

Example 61 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class SingleHopClientExecutor method submitGetAll.

static Map<ServerLocation, Object> submitGetAll(Map<ServerLocation, HashSet> serverToFilterMap, List callableTasks, ClientMetadataService cms, LocalRegion region) {
    if (callableTasks != null && !callableTasks.isEmpty()) {
        Map<ServerLocation, Object> resultMap = new HashMap<ServerLocation, Object>();
        List futures = null;
        try {
            futures = execService.invokeAll(callableTasks);
        } catch (RejectedExecutionException rejectedExecutionEx) {
            throw rejectedExecutionEx;
        } catch (InterruptedException e) {
            throw new InternalGemFireException(e.getMessage());
        }
        if (futures != null) {
            Iterator futureItr = futures.iterator();
            Iterator taskItr = callableTasks.iterator();
            while (futureItr.hasNext() && !execService.isShutdown() && !execService.isTerminated()) {
                Future fut = (Future) futureItr.next();
                SingleHopOperationCallable task = (SingleHopOperationCallable) taskItr.next();
                List keys = ((GetAllOpImpl) task.getOperation()).getKeyList();
                ServerLocation server = task.getServer();
                try {
                    VersionedObjectList valuesFromServer = (VersionedObjectList) fut.get();
                    valuesFromServer.setKeys(keys);
                    for (VersionedObjectList.Iterator it = valuesFromServer.iterator(); it.hasNext(); ) {
                        VersionedObjectList.Entry entry = it.next();
                        Object key = entry.getKey();
                        Object value = entry.getValue();
                        if (!entry.isKeyNotOnServer()) {
                            if (value instanceof Throwable) {
                                logger.warn(LocalizedMessage.create(LocalizedStrings.GetAll_0_CAUGHT_THE_FOLLOWING_EXCEPTION_ATTEMPTING_TO_GET_VALUE_FOR_KEY_1, new Object[] { value, key }), (Throwable) value);
                            }
                        }
                    }
                    if (logger.isDebugEnabled()) {
                        logger.debug("GetAllOp#got result from {}: {}", server, valuesFromServer);
                    }
                    resultMap.put(server, valuesFromServer);
                } catch (InterruptedException e) {
                    throw new InternalGemFireException(e.getMessage());
                } catch (ExecutionException ee) {
                    if (ee.getCause() instanceof ServerOperationException) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("GetAllOp#ExecutionException.ServerOperationException : Caused by :{}", ee.getCause());
                        }
                        throw (ServerOperationException) ee.getCause();
                    } else if (ee.getCause() instanceof ServerConnectivityException) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("GetAllOp#ExecutionException.ServerConnectivityException : Caused by :{} The failed server is: {}", ee.getCause(), server);
                        }
                        try {
                            cms = region.getCache().getClientMetadataService();
                        } catch (CacheClosedException e) {
                            return null;
                        }
                        cms.removeBucketServerLocation(server);
                        cms.scheduleGetPRMetaData((LocalRegion) region, false);
                        resultMap.put(server, ee.getCause());
                    } else {
                        throw executionThrowable(ee.getCause());
                    }
                }
            }
            return resultMap;
        }
    }
    return null;
}
Also used : HashMap(java.util.HashMap) ServerLocation(org.apache.geode.distributed.internal.ServerLocation) InternalGemFireException(org.apache.geode.InternalGemFireException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) CacheClosedException(org.apache.geode.cache.CacheClosedException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) GetAllOpImpl(org.apache.geode.cache.client.internal.GetAllOp.GetAllOpImpl) ServerConnectivityException(org.apache.geode.cache.client.ServerConnectivityException) Iterator(java.util.Iterator) Future(java.util.concurrent.Future) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) List(java.util.List) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 62 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class PartitionedTXRegionStub method postPutAll.

/**
   * Create PutAllPRMsgs for each bucket, and send them.
   * 
   * @param putallO DistributedPutAllOperation object.
   */
public void postPutAll(DistributedPutAllOperation putallO, VersionedObjectList successfulPuts, LocalRegion r) throws TransactionException {
    if (r.getCache().isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    PartitionedRegion pr = (PartitionedRegion) r;
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap prMsgMap = putallO.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(putallO.putAllDataSize);
    // this is rebuilt by this method
    successfulPuts.clear();
    Iterator itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) itor.next();
        Integer bucketId = (Integer) mapEntry.getKey();
        PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue();
        pr.checkReadiness();
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr);
            // prMsg.saveKeySet(partialKeys);
            partialKeys.addKeysAndVersions(versions);
            successfulPuts.addAll(versions);
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
    }
    pr.prStats.endPutAll(startTime);
    if (partialKeys.hasFailure()) {
        pr.getCache().getLoggerI18n().info(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { pr.getFullPath(), partialKeys });
        if (putallO.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) PrimaryBucketException(org.apache.geode.internal.cache.PrimaryBucketException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) TransactionException(org.apache.geode.cache.TransactionException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) TransactionDataNodeHasDepartedException(org.apache.geode.cache.TransactionDataNodeHasDepartedException) DataLocationException(org.apache.geode.internal.cache.DataLocationException) CancelException(org.apache.geode.CancelException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) Entry(org.apache.geode.cache.Region.Entry) PutAllPRMessage(org.apache.geode.internal.cache.partitioned.PutAllPRMessage) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Iterator(java.util.Iterator) CancelException(org.apache.geode.CancelException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 63 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class PartitionedTXRegionStub method postRemoveAll.

@Override
public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList successfulOps, LocalRegion r) {
    if (r.getCache().isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    PartitionedRegion pr = (PartitionedRegion) r;
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
    // this is rebuilt by this method
    successfulOps.clear();
    Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
        Integer bucketId = mapEntry.getKey();
        RemoveAllPRMessage prMsg = mapEntry.getValue();
        pr.checkReadiness();
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr);
            // prMsg.saveKeySet(partialKeys);
            partialKeys.addKeysAndVersions(versions);
            successfulOps.addAll(versions);
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
    }
    pr.prStats.endRemoveAll(startTime);
    if (partialKeys.hasFailure()) {
        pr.getCache().getLoggerI18n().info(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_0_1, new Object[] { pr.getFullPath(), partialKeys });
        if (op.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
}
Also used : EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) RemoveAllPRMessage(org.apache.geode.internal.cache.partitioned.RemoveAllPRMessage) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) PrimaryBucketException(org.apache.geode.internal.cache.PrimaryBucketException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) TransactionException(org.apache.geode.cache.TransactionException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) TransactionDataNodeHasDepartedException(org.apache.geode.cache.TransactionDataNodeHasDepartedException) DataLocationException(org.apache.geode.internal.cache.DataLocationException) CancelException(org.apache.geode.CancelException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) Entry(org.apache.geode.cache.Region.Entry) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) CancelException(org.apache.geode.CancelException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 64 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class DeployedJar method cleanUp.

/**
   * Unregisters all functions from this jar if it was undeployed (i.e. newVersion == null), or all
   * functions not present in the new version if it was redeployed.
   *
   * @param newVersion The new version of this jar that was deployed, or null if this jar was
   *        undeployed.
   */
protected synchronized void cleanUp(DeployedJar newVersion) {
    Stream<String> oldFunctions = this.registeredFunctions.stream().map(Function::getId);
    Stream<String> removedFunctions;
    if (newVersion == null) {
        removedFunctions = oldFunctions;
    } else {
        Predicate<String> isRemoved = (String oldFunctionId) -> !newVersion.hasFunctionWithId(oldFunctionId);
        removedFunctions = oldFunctions.filter(isRemoved);
    }
    removedFunctions.forEach(FunctionService::unregisterFunction);
    this.registeredFunctions.clear();
    try {
        TypeRegistry typeRegistry = ((InternalCache) CacheFactory.getAnyInstance()).getPdxRegistry();
        if (typeRegistry != null) {
            typeRegistry.flushCache();
        }
    } catch (CacheClosedException ignored) {
    // That's okay, it just means there was nothing to flush to begin with
    }
}
Also used : Function(org.apache.geode.cache.execute.Function) InternalCache(org.apache.geode.internal.cache.InternalCache) FunctionService(org.apache.geode.cache.execute.FunctionService) CacheClosedException(org.apache.geode.cache.CacheClosedException) TypeRegistry(org.apache.geode.pdx.internal.TypeRegistry)

Example 65 with CacheClosedException

use of org.apache.geode.cache.CacheClosedException in project geode by apache.

the class Oplog method basicModify.

/**
   * A helper function which identifies whether to modify the entry in the current oplog or to make
   * the switch to the next oplog. This function enables us to reuse the byte buffer which got
   * created for an oplog which no longer permits us to use itself. It will also take acre of
   * compaction if required
   * 
   * @param entry DiskEntry object representing the current Entry
   */
private void basicModify(DiskRegionView dr, DiskEntry entry, ValueWrapper value, byte userBits, boolean async, boolean calledByCompactor) throws IOException, InterruptedException {
    DiskId id = entry.getDiskId();
    boolean useNextOplog = false;
    long startPosForSynchOp = -1L;
    int adjustment = 0;
    Oplog emptyOplog = null;
    if (DiskStoreImpl.KRF_DEBUG) {
        // wait for cache close to create krf
        System.out.println("basicModify KRF_DEBUG");
        Thread.sleep(1000);
    }
    synchronized (this.lock) {
        // synchronized (this.crf) {
        if (getOplogSet().getChild() != this) {
            useNextOplog = true;
        } else {
            initOpState(OPLOG_MOD_ENTRY_1ID, dr, entry, value, userBits, false);
            adjustment = getOpStateSize();
            assert adjustment > 0;
            long temp = (this.crf.currSize + adjustment);
            if (temp > getMaxCrfSize() && !isFirstRecord()) {
                switchOpLog(dr, adjustment, entry);
                // we can't reuse it since it contains variable length data
                useNextOplog = true;
            } else {
                if (this.lockedForKRFcreate) {
                    CacheClosedException cce = new CacheClosedException("The disk store is closed.");
                    dr.getCancelCriterion().checkCancelInProgress(cce);
                    throw cce;
                }
                this.firstRecord = false;
                long oldOplogId;
                // do the io while holding lock so that switch can set doneAppending
                // Write the data to the opLog for the synch mode
                startPosForSynchOp = writeOpLogBytes(this.crf, async, true);
                this.crf.currSize = temp;
                startPosForSynchOp += getOpStateValueOffset();
                if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
                    VersionTag tag = null;
                    if (entry.getVersionStamp() != null) {
                        tag = entry.getVersionStamp().asVersionTag();
                    }
                    logger.trace(LogMarker.PERSIST_WRITES, "basicModify: id=<{}> key=<{}> valueOffset={} userBits={} valueLen={} valueBytes=<{}> drId={} versionStamp={} oplog#{}", abs(id.getKeyId()), entry.getKey(), startPosForSynchOp, userBits, value.getLength(), value.getBytesAsString(), dr.getId(), tag, getOplogId());
                }
                if (EntryBits.isNeedsValue(userBits)) {
                    id.setValueLength(value.getLength());
                } else {
                    id.setValueLength(0);
                }
                id.setUserBits(userBits);
                if (logger.isTraceEnabled()) {
                    logger.trace("Oplog::basicModify:Released ByteBuffer with data for Disk ID = {}", id);
                }
                synchronized (id) {
                    // Need to do this while synced on id
                    // now that we compact forward to most recent oplog.
                    // @todo darrel: The sync logic in the disk code is so complex
                    // a really doubt is is correct.
                    // I think we need to do a fresh rewrite of it.
                    oldOplogId = id.setOplogId(getOplogId());
                    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) {
                        id.setOffsetInOplog(-1);
                    } else {
                        id.setOffsetInOplog(startPosForSynchOp);
                    }
                }
                // Set the oplog size change for stats
                this.dirHolder.incrementTotalOplogSize(adjustment);
                this.incTotalCount();
                EntryLogger.logPersistPut(dr.getName(), entry.getKey(), dr.getDiskStoreID());
                if (oldOplogId != getOplogId()) {
                    Oplog oldOplog = getOplogSet().getChild(oldOplogId);
                    if (oldOplog != null) {
                        oldOplog.rmLive(dr, entry);
                        emptyOplog = oldOplog;
                    }
                    addLive(dr, entry);
                // Note if this mod was done to oldOplog then this entry is already
                // in
                // the linked list. All we needed to do in this case is call
                // incTotalCount
                } else {
                    getOrCreateDRI(dr).update(entry);
                }
                // Update the region version vector for the disk store.
                // This needs to be done under lock so that we don't switch oplogs
                // unit the version vector accurately represents what is in this oplog
                RegionVersionVector rvv = dr.getRegionVersionVector();
                if (rvv != null && entry.getVersionStamp() != null) {
                    rvv.recordVersion(entry.getVersionStamp().getMemberID(), entry.getVersionStamp().getRegionVersion());
                }
            }
            clearOpState();
        }
    // }
    }
    if (useNextOplog) {
        if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
            CacheObserverHolder.getInstance().afterSwitchingOplog();
        }
        Assert.assertTrue(getOplogSet().getChild() != this);
        getOplogSet().getChild().basicModify(dr, entry, value, userBits, async, calledByCompactor);
    } else {
        if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
            CacheObserverHolder.getInstance().afterSettingOplogOffSet(startPosForSynchOp);
        }
        if (emptyOplog != null && (!emptyOplog.isCompacting() || emptyOplog.calledByCompactorThread())) {
            if (calledByCompactor && emptyOplog.hasNoLiveValues()) {
                // Since compactor will only append to crf no need to flush drf.
                // Before we have the compactor delete an oplog it has emptied out
                // we want to have it flush anything it has written to the current
                // oplog.
                // Note that since sync writes may be done to the same oplog we are
                // doing
                // async writes to any sync writes will cause a flush to be done
                // immediately.
                flushAll(true);
            }
            emptyOplog.handleNoLiveValues();
        }
    }
}
Also used : VersionTag(org.apache.geode.internal.cache.versions.VersionTag) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) CacheClosedException(org.apache.geode.cache.CacheClosedException)

Aggregations

CacheClosedException (org.apache.geode.cache.CacheClosedException)95 Cache (org.apache.geode.cache.Cache)26 Test (org.junit.Test)21 IOException (java.io.IOException)20 ArrayList (java.util.ArrayList)20 FunctionException (org.apache.geode.cache.execute.FunctionException)20 FunctionInvocationTargetException (org.apache.geode.cache.execute.FunctionInvocationTargetException)20 CancelException (org.apache.geode.CancelException)18 Region (org.apache.geode.cache.Region)18 Host (org.apache.geode.test.dunit.Host)17 VM (org.apache.geode.test.dunit.VM)17 InternalCache (org.apache.geode.internal.cache.InternalCache)16 IgnoredException (org.apache.geode.test.dunit.IgnoredException)16 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)16 DistributedMember (org.apache.geode.distributed.DistributedMember)14 ReplyException (org.apache.geode.distributed.internal.ReplyException)14 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)12 Execution (org.apache.geode.cache.execute.Execution)11 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)11 HashMap (java.util.HashMap)10