Search in sources :

Example 31 with TimeoutException

use of org.apache.geode.cache.TimeoutException in project geode by apache.

the class PartitionedRegion method virtualPut.

@Override
boolean virtualPut(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, boolean overwriteDestroyed) throws TimeoutException, CacheWriterException {
    final long startTime = PartitionedRegionStats.startTime();
    boolean result = false;
    final DistributedPutAllOperation putAllOp_save = event.setPutAllOperation(null);
    if (event.getEventId() == null) {
        event.setNewEventId(this.cache.getDistributedSystem());
    }
    boolean bucketStorageAssigned = true;
    try {
        final Integer bucketId = event.getKeyInfo().getBucketId();
        assert bucketId != KeyInfo.UNKNOWN_BUCKET;
        // check in bucket2Node region
        InternalDistributedMember targetNode = getNodeForBucketWrite(bucketId, null);
        // and to optimize distribution.
        if (logger.isDebugEnabled()) {
            logger.debug("PR.virtualPut putting event={}", event);
        }
        if (targetNode == null) {
            try {
                bucketStorageAssigned = false;
                targetNode = createBucket(bucketId, event.getNewValSizeForPR(), null);
            } catch (PartitionedRegionStorageException e) {
                // try not to throw a PRSE if the cache is closing or this region was
                // destroyed during createBucket() (bug 36574)
                this.checkReadiness();
                if (this.cache.isClosed()) {
                    throw new RegionDestroyedException(toString(), getFullPath());
                }
                throw e;
            }
        }
        if (event.isBridgeEvent() && bucketStorageAssigned) {
            setNetworkHopType(bucketId, targetNode);
        }
        if (putAllOp_save == null) {
            result = putInBucket(targetNode, bucketId, event, ifNew, ifOld, expectedOldValue, requireOldValue, (ifNew ? 0L : lastModified));
            if (logger.isDebugEnabled()) {
                logger.debug("PR.virtualPut event={} ifNew={} ifOld={} result={}", event, ifNew, ifOld, result);
            }
        } else {
            // fix for 40502
            checkIfAboveThreshold(event);
            // putAll: save the bucket id into DPAO, then wait for postPutAll to send msg
            // at this time, DPAO's PutAllEntryData should be empty, we should add entry here with
            // bucket id
            // the message will be packed in postPutAll, include the one to local bucket, because the
            // buckets
            // could be changed at that time
            putAllOp_save.addEntry(event, bucketId);
            if (logger.isDebugEnabled()) {
                logger.debug("PR.virtualPut PutAll added event={} into bucket {}", event, bucketId);
            }
            result = true;
        }
    } catch (RegionDestroyedException rde) {
        if (!rde.getRegionFullPath().equals(getFullPath())) {
            throw new RegionDestroyedException(toString(), getFullPath(), rde);
        }
    } finally {
        if (putAllOp_save == null) {
            // only for normal put
            if (ifNew) {
                this.prStats.endCreate(startTime);
            } else {
                this.prStats.endPut(startTime);
            }
        }
    }
    if (!result) {
        checkReadiness();
        if (!ifNew && !ifOld && !this.concurrencyChecksEnabled) {
            // may fail due to concurrency conflict
            // failed for unknown reason
            // throw new PartitionedRegionStorageException("unable to execute operation");
            logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_PRVIRTUALPUT_RETURNING_FALSE_WHEN_IFNEW_AND_IFOLD_ARE_BOTH_FALSE), new Exception(LocalizedStrings.PartitionedRegion_STACK_TRACE.toLocalizedString()));
        }
    }
    return result;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) TimeoutException(org.apache.geode.cache.TimeoutException) IndexCreationException(org.apache.geode.cache.query.IndexCreationException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireException(org.apache.geode.InternalGemFireException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) LockServiceDestroyedException(org.apache.geode.distributed.LockServiceDestroyedException) GatewaySenderException(org.apache.geode.internal.cache.wan.GatewaySenderException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) GatewaySenderConfigurationException(org.apache.geode.internal.cache.wan.GatewaySenderConfigurationException) ExecutionException(java.util.concurrent.ExecutionException) ReplyException(org.apache.geode.distributed.internal.ReplyException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) EntryExistsException(org.apache.geode.cache.EntryExistsException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) NoSuchElementException(java.util.NoSuchElementException) QueryException(org.apache.geode.cache.query.QueryException) PartitionNotAvailableException(org.apache.geode.cache.partition.PartitionNotAvailableException) LowMemoryException(org.apache.geode.cache.LowMemoryException) InternalFunctionInvocationTargetException(org.apache.geode.internal.cache.execute.InternalFunctionInvocationTargetException) IndexInvalidException(org.apache.geode.cache.query.IndexInvalidException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) RegionExistsException(org.apache.geode.cache.RegionExistsException) CancelException(org.apache.geode.CancelException) DiskAccessException(org.apache.geode.cache.DiskAccessException) CacheWriterException(org.apache.geode.cache.CacheWriterException) TransactionException(org.apache.geode.cache.TransactionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) EmptyRegionFunctionException(org.apache.geode.cache.execute.EmptyRegionFunctionException)

Example 32 with TimeoutException

use of org.apache.geode.cache.TimeoutException in project geode by apache.

the class PartitionedRegion method getBucketOwnersForValidation.

/**
   * Test Method: Fetch the given bucket's meta-data from each member hosting buckets
   * 
   * @param bucketId the identity of the bucket
   * @return list of arrays, each array element containing a {@link DistributedMember} and a
   *         {@link Boolean} the boolean denotes if the member is hosting the bucket and believes it
   *         is the primary
   * @throws ForceReattemptException if the caller should reattempt this request
   */
public List getBucketOwnersForValidation(int bucketId) throws ForceReattemptException {
    // bucketid 1 => "vm A", false | "vm B", false | "vm C", true | "vm D", false
    // bucketid 2 => List< Tuple(MemberId mem, Boolean isPrimary) >
    // remotely fetch each VM's bucket meta-data (versus looking at the bucket
    // advisor's data
    RuntimeException rte = null;
    List remoteInfos = null;
    for (int i = 0; i < 3; i++) {
        rte = null;
        DumpB2NResponse response = DumpB2NRegion.send(getRegionAdvisor().adviseDataStore(), this, bucketId, true);
        try {
            remoteInfos = new LinkedList(response.waitForPrimaryInfos());
        } catch (TimeoutException e) {
            rte = e;
            logger.info("DumpB2NRegion failed to get PR {}, bucket id {}'s info due to {}, retrying...", this.getFullPath(), bucketId, e.getMessage());
        }
    }
    if (rte != null) {
        logger.info("DumpB2NRegion retried 3 times", rte);
        throw rte;
    }
    // Include current VM in the status...
    if (getRegionAdvisor().getBucket(bucketId).isHosting()) {
        if (getRegionAdvisor().isPrimaryForBucket(bucketId)) {
            remoteInfos.add(new Object[] { getSystem().getDM().getId(), Boolean.TRUE, "" });
        } else {
            remoteInfos.add(new Object[] { getSystem().getDM().getId(), Boolean.FALSE, "" });
        }
    }
    return remoteInfos;
}
Also used : VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) DumpB2NResponse(org.apache.geode.internal.cache.partitioned.DumpB2NRegion.DumpB2NResponse) LinkedList(java.util.LinkedList) TimeoutException(org.apache.geode.cache.TimeoutException)

Example 33 with TimeoutException

use of org.apache.geode.cache.TimeoutException in project geode by apache.

the class CacheLoaderTestCase method testCacheLoader.

/////////////////////// Test Methods ///////////////////////
@Test
public void testCacheLoader() throws CacheException {
    final String name = this.getUniqueName();
    final Object key = this.getUniqueName();
    final Object value = new Integer(42);
    final Object arg = "ARG";
    final String exception = "EXCEPTION";
    TestCacheLoader loader = new TestCacheLoader() {

        public Object load2(LoaderHelper helper) throws CacheLoaderException {
            assertEquals(key, helper.getKey());
            assertEquals(name, helper.getRegion().getName());
            try {
                RegionAttributes attrs = helper.getRegion().getAttributes();
                if (attrs.getScope().isDistributed()) {
                    assertNull(helper.netSearch(false));
                    assertNull(helper.netSearch(true));
                }
            } catch (TimeoutException ex) {
                Assert.fail("Why did I time out?", ex);
            }
            Object argument = helper.getArgument();
            if (argument != null) {
                if (argument.equals(exception)) {
                    String s = "Test Exception";
                    throw new CacheLoaderException(s);
                } else {
                    assertEquals(arg, argument);
                }
            }
            return value;
        }
    };
    AttributesFactory factory = new AttributesFactory(getRegionAttributes());
    factory.setCacheLoader(loader);
    Region region = createRegion(name, factory.create());
    loader.wasInvoked();
    Region.Entry entry = region.getEntry(key);
    assertNull(entry);
    region.create(key, null);
    entry = region.getEntry(key);
    assertNotNull(entry);
    assertNull(entry.getValue());
    assertEquals(value, region.get(key));
    assertTrue(loader.wasInvoked());
    assertEquals(value, region.getEntry(key).getValue());
}
Also used : LoaderHelper(org.apache.geode.cache.LoaderHelper) AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) Region(org.apache.geode.cache.Region) TimeoutException(org.apache.geode.cache.TimeoutException) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 34 with TimeoutException

use of org.apache.geode.cache.TimeoutException in project geode by apache.

the class DirectChannel method handleAckTimeout.

/**
   * 
   * @param ackTimeout ack wait threshold
   * @param ackSATimeout severe alert threshold
   * @param c
   * @param processor
   * @throws ConnectionException
   */
private void handleAckTimeout(long ackTimeout, long ackSATimeout, Connection c, DirectReplyProcessor processor) throws ConnectionException {
    DM dm = getDM();
    Set activeMembers = dm.getDistributionManagerIds();
    // Increment the stat
    dm.getStats().incReplyTimeouts();
    // an alert that will show up in the console
    {
        final StringId msg = LocalizedStrings.DirectChannel_0_SECONDS_HAVE_ELAPSED_WHILE_WAITING_FOR_REPLY_FROM_1_ON_2_WHOSE_CURRENT_MEMBERSHIP_LIST_IS_3;
        final Object[] msgArgs = new Object[] { Long.valueOf(ackTimeout / 1000), c.getRemoteAddress(), dm.getId(), activeMembers };
        logger.warn(LocalizedMessage.create(msg, msgArgs));
        msgArgs[3] = "(omitted)";
        Breadcrumbs.setProblem(msg, msgArgs);
        if (ReplyProcessor21.THROW_EXCEPTION_ON_TIMEOUT) {
            // init the cause to be a TimeoutException so catchers can determine cause
            TimeoutException cause = new TimeoutException(LocalizedStrings.TIMED_OUT_WAITING_FOR_ACKS.toLocalizedString());
            throw new InternalGemFireException(msg.toLocalizedString(msgArgs), cause);
        }
    }
    if (activeMembers.contains(c.getRemoteAddress())) {
        // wait for ack-severe-alert-threshold period first, then wait forever
        if (ackSATimeout > 0) {
            try {
                c.readAck((int) ackSATimeout, ackSATimeout, processor);
                return;
            } catch (SocketTimeoutException e) {
                Object[] args = new Object[] { Long.valueOf((ackSATimeout + ackTimeout) / 1000), c.getRemoteAddress(), dm.getId(), activeMembers };
                logger.fatal(LocalizedMessage.create(LocalizedStrings.DirectChannel_0_SECONDS_HAVE_ELAPSED_WHILE_WAITING_FOR_REPLY_FROM_1_ON_2_WHOSE_CURRENT_MEMBERSHIP_LIST_IS_3, args));
            }
        }
        try {
            c.readAck(0, 0, processor);
        } catch (SocketTimeoutException ex) {
            // this can never happen when called with timeout of 0
            logger.error(LocalizedMessage.create(LocalizedStrings.DirectChannel_UNEXPECTED_TIMEOUT_WHILE_WAITING_FOR_ACK_FROM__0, c.getRemoteAddress()), ex);
        }
    } else {
        logger.warn(LocalizedMessage.create(LocalizedStrings.DirectChannel_VIEW_NO_LONGER_HAS_0_AS_AN_ACTIVE_MEMBER_SO_WE_WILL_NO_LONGER_WAIT_FOR_IT, c.getRemoteAddress()));
        processor.memberDeparted(c.getRemoteAddress(), true);
    }
}
Also used : SocketTimeoutException(java.net.SocketTimeoutException) StringId(org.apache.geode.i18n.StringId) TimeoutException(org.apache.geode.cache.TimeoutException) SocketTimeoutException(java.net.SocketTimeoutException)

Example 35 with TimeoutException

use of org.apache.geode.cache.TimeoutException in project geode by apache.

the class PartitionedRegion method destroyInBucket.

/**
   * @param expectedOldValue only succeed if current value is equal to expectedOldValue
   * @throws EntryNotFoundException if entry not found or if expectedOldValue not null and current
   *         value was not equal to expectedOldValue
   */
public void destroyInBucket(final EntryEventImpl event, Object expectedOldValue) throws EntryNotFoundException, CacheWriterException {
    // Get the bucket id for the key
    final Integer bucketId = event.getKeyInfo().getBucketId();
    assert bucketId != KeyInfo.UNKNOWN_BUCKET;
    // check in bucket2Node region
    final InternalDistributedMember targetNode = getOrCreateNodeForBucketWrite(bucketId, null);
    if (logger.isDebugEnabled()) {
        logger.debug("destroyInBucket: key={} ({}) in node {} to bucketId={} retry={} ms", event.getKey(), event.getKey().hashCode(), targetNode, bucketStringForLogs(bucketId), retryTimeout);
    }
    // retry the put remotely until it finds the right node managing the bucket
    RetryTimeKeeper retryTime = null;
    InternalDistributedMember currentTarget = targetNode;
    long timeOut = 0;
    int count = 0;
    for (; ; ) {
        switch(count) {
            case 0:
                // First time, keep going
                break;
            case 1:
                // First failure
                this.cache.getCancelCriterion().checkCancelInProgress(null);
                timeOut = System.currentTimeMillis() + this.retryTimeout;
                break;
            default:
                this.cache.getCancelCriterion().checkCancelInProgress(null);
                // test for timeout
                long timeLeft = timeOut - System.currentTimeMillis();
                if (timeLeft < 0) {
                    PRHARedundancyProvider.timedOut(this, null, null, "destroy an entry", this.retryTimeout);
                // NOTREACHED
                }
                // Didn't time out. Sleep a bit and then continue
                boolean interrupted = Thread.interrupted();
                try {
                    Thread.sleep(PartitionedRegionHelper.DEFAULT_WAIT_PER_RETRY_ITERATION);
                } catch (InterruptedException ignore) {
                    interrupted = true;
                } finally {
                    if (interrupted) {
                        Thread.currentThread().interrupt();
                    }
                }
                break;
        }
        count++;
        if (currentTarget == null) {
            // pick target
            checkReadiness();
            if (retryTime == null) {
                retryTime = new RetryTimeKeeper(this.retryTimeout);
            }
            if (retryTime.overMaximum()) {
                // }
                if (getRegionAdvisor().getBucket(bucketId).getBucketAdvisor().basicGetPrimaryMember() == null) {
                    throw new EntryNotFoundException(LocalizedStrings.PartitionedRegion_ENTRY_NOT_FOUND_FOR_KEY_0.toLocalizedString(event.getKey()));
                }
                TimeoutException e = new TimeoutException(LocalizedStrings.PartitionedRegion_TIME_OUT_LOOKING_FOR_TARGET_NODE_FOR_DESTROY_WAITED_0_MS.toLocalizedString(retryTime.getRetryTime()));
                if (logger.isDebugEnabled()) {
                    logger.debug(e.getMessage(), e);
                }
                checkReadiness();
                throw e;
            }
            currentTarget = getOrCreateNodeForBucketWrite(bucketId, retryTime);
            // No storage found for bucket, early out preventing hot loop, bug 36819
            if (currentTarget == null) {
                checkEntryNotFound(event.getKey());
            }
            continue;
        }
        // pick target
        final boolean isLocal = (this.localMaxMemory > 0) && currentTarget.equals(getMyId());
        try {
            DistributedRemoveAllOperation savedOp = event.setRemoveAllOperation(null);
            if (savedOp != null) {
                savedOp.addEntry(event, bucketId);
                return;
            }
            if (isLocal) {
                // doCacheWriteBeforeDestroy(event);
                event.setInvokePRCallbacks(true);
                this.dataStore.destroyLocally(bucketId, event, expectedOldValue);
            } else {
                if (event.isBridgeEvent()) {
                    setNetworkHopType(bucketId, currentTarget);
                }
                destroyRemotely(currentTarget, bucketId, event, expectedOldValue);
            }
            return;
        // NOTREACHED (success)
        } catch (ConcurrentCacheModificationException e) {
            if (logger.isDebugEnabled()) {
                logger.debug("destroyInBucket: caught concurrent cache modification exception", e);
            }
            event.isConcurrencyConflict(true);
            if (logger.isTraceEnabled()) {
                logger.trace("ConcurrentCacheModificationException received for destroyInBucket for bucketId: {}{}{} for event: {} No reattempt is done, returning from here", getPRId(), BUCKET_ID_SEPARATOR, bucketId, event);
            }
            return;
        } catch (ForceReattemptException e) {
            e.checkKey(event.getKey());
            // We don't know if the destroy took place or not at this point.
            // Assume that if the next destroy throws EntryDestroyedException, the
            // previous destroy attempt was a success
            checkReadiness();
            InternalDistributedMember lastNode = currentTarget;
            if (retryTime == null) {
                retryTime = new RetryTimeKeeper(this.retryTimeout);
            }
            currentTarget = getOrCreateNodeForBucketWrite(bucketId, retryTime);
            event.setPossibleDuplicate(true);
            if (lastNode.equals(currentTarget)) {
                if (retryTime.overMaximum()) {
                    PRHARedundancyProvider.timedOut(this, null, null, "destroy an entry", retryTime.getRetryTime());
                }
                retryTime.waitToRetryNode();
            }
        } catch (PrimaryBucketException notPrimary) {
            if (logger.isDebugEnabled()) {
                logger.debug("destroyInBucket: {} on Node {} not primary", notPrimary.getLocalizedMessage(), currentTarget);
            }
            getRegionAdvisor().notPrimary(bucketId, currentTarget);
            if (retryTime == null) {
                retryTime = new RetryTimeKeeper(this.retryTimeout);
            }
            currentTarget = getOrCreateNodeForBucketWrite(bucketId, retryTime);
        }
        // If we get here, the attempt failed.
        if (count == 1) {
            this.prStats.incDestroyOpsRetried();
        }
        this.prStats.incDestroyRetries();
        if (logger.isDebugEnabled()) {
            logger.debug("destroyInBucket: Attempting to resend destroy to node {} after {} failed attempts", currentTarget, count);
        }
    }
// for
}
Also used : ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) TimeoutException(org.apache.geode.cache.TimeoutException)

Aggregations

TimeoutException (org.apache.geode.cache.TimeoutException)48 Test (org.junit.Test)24 CacheException (org.apache.geode.cache.CacheException)22 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)21 Host (org.apache.geode.test.dunit.Host)20 VM (org.apache.geode.test.dunit.VM)20 Region (org.apache.geode.cache.Region)18 AttributesFactory (org.apache.geode.cache.AttributesFactory)17 CacheLoaderException (org.apache.geode.cache.CacheLoaderException)14 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)13 CacheWriterException (org.apache.geode.cache.CacheWriterException)10 LoaderHelper (org.apache.geode.cache.LoaderHelper)10 Lock (java.util.concurrent.locks.Lock)8 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)6 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)6 DLockTest (org.apache.geode.test.junit.categories.DLockTest)6 IOException (java.io.IOException)5 InternalGemFireError (org.apache.geode.InternalGemFireError)5 CacheWriter (org.apache.geode.cache.CacheWriter)5 StringId (org.apache.geode.i18n.StringId)5