Search in sources :

Example 61 with GridCacheEntryRemovedException

use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.

the class GridDhtAtomicCache method updateSingle.

/**
 * Updates locked entries one-by-one.
 *
 * @param nearNode Originating node.
 * @param hasNear {@code True} if originating node has near cache.
 * @param req Update request.
 * @param res Update response.
 * @param locked Locked entries.
 * @param ver Assigned update version.
 * @param dhtFut Optional DHT future.
 * @param replicate Whether DR is enabled for that cache.
 * @param taskName Task name.
 * @param expiry Expiry policy.
 * @param sndPrevVal If {@code true} sends previous value to backups.
 * @return Return value.
 * @throws GridCacheEntryRemovedException Should be never thrown.
 */
private DhtAtomicUpdateResult updateSingle(ClusterNode nearNode, boolean hasNear, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res, List<GridDhtCacheEntry> locked, GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, boolean replicate, String taskName, @Nullable IgniteCacheExpiryPolicy expiry, boolean sndPrevVal) throws GridCacheEntryRemovedException {
    GridCacheReturn retVal = null;
    Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
    AffinityTopologyVersion topVer = req.topologyVersion();
    boolean intercept = ctx.config().getInterceptor() != null;
    AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
    // Avoid iterator creation.
    for (int i = 0; i < req.size(); i++) {
        KeyCacheObject k = req.key(i);
        GridCacheOperation op = req.operation();
        // No GridCacheEntryRemovedException can be thrown.
        try {
            GridDhtCacheEntry entry = locked.get(i);
            GridCacheVersion newConflictVer = req.conflictVersion(i);
            long newConflictTtl = req.conflictTtl(i);
            long newConflictExpireTime = req.conflictExpireTime(i);
            assert !(newConflictVer instanceof GridCacheVersionEx) : newConflictVer;
            Object writeVal = op == TRANSFORM ? req.entryProcessor(i) : req.writeValue(i);
            // Get readers before innerUpdate (reader cleared after remove).
            GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
            GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, req.invokeArguments(), writeThrough() && !req.skipStore(), !req.skipStore(), sndPrevVal || req.returnValue(), req.keepBinary(), expiry, /*event*/
            true, /*metrics*/
            true, /*primary*/
            true, /*verCheck*/
            false, topVer, req.filter(), replicate ? DR_PRIMARY : DR_NONE, newConflictTtl, newConflictExpireTime, newConflictVer, /*conflictResolve*/
            true, intercept, req.subjectId(), taskName, /*prevVal*/
            null, /*updateCntr*/
            null, dhtFut);
            if (dhtFut != null) {
                if (updRes.sendToDht()) {
                    // Send to backups even in case of remove-remove scenarios.
                    GridCacheVersionConflictContext<?, ?> conflictCtx = updRes.conflictResolveResult();
                    if (conflictCtx == null)
                        newConflictVer = null;
                    else if (conflictCtx.isMerge())
                        // Conflict version is discarded in case of merge.
                        newConflictVer = null;
                    EntryProcessor<Object, Object, Object> entryProcessor = null;
                    dhtFut.addWriteEntry(affAssignment, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime(), newConflictVer, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
                    if (readers != null)
                        dhtFut.addNearWriteEntries(nearNode, readers, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime());
                } else {
                    if (log.isDebugEnabled())
                        log.debug("Entry did not pass the filter or conflict resolution (will skip write) " + "[entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ']');
                }
            }
            if (hasNear) {
                if (updRes.sendToDht()) {
                    if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
                        // If put the same value as in request then do not need to send it back.
                        if (op == TRANSFORM || writeVal != updRes.newValue()) {
                            res.addNearValue(i, updRes.newValue(), updRes.newTtl(), updRes.conflictExpireTime());
                        } else
                            res.addNearTtl(i, updRes.newTtl(), updRes.conflictExpireTime());
                        if (updRes.newValue() != null) {
                            IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
                            assert f == null : f;
                        }
                    } else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
                        // Reader became primary or backup.
                        entry.removeReader(nearNode.id(), req.messageId());
                    } else
                        res.addSkippedIndex(i);
                } else
                    res.addSkippedIndex(i);
            }
            if (updRes.removeVersion() != null) {
                if (deleted == null)
                    deleted = new ArrayList<>(req.size());
                deleted.add(F.t(entry, updRes.removeVersion()));
            }
            if (op == TRANSFORM) {
                assert !req.returnValue();
                IgniteBiTuple<Object, Exception> compRes = updRes.computedResult();
                if (compRes != null && (compRes.get1() != null || compRes.get2() != null)) {
                    if (retVal == null)
                        retVal = new GridCacheReturn(nearNode.isLocal());
                    retVal.addEntryProcessResult(ctx, k, null, compRes.get1(), compRes.get2(), req.keepBinary());
                }
            } else {
                // Create only once.
                if (retVal == null) {
                    CacheObject ret = updRes.oldValue();
                    retVal = new GridCacheReturn(ctx, nearNode.isLocal(), req.keepBinary(), req.returnValue() ? ret : null, updRes.success());
                }
            }
        } catch (IgniteCheckedException e) {
            res.addFailedKey(k, e);
        }
    }
    return new DhtAtomicUpdateResult(retVal, deleted, dhtFut);
}
Also used : AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) ArrayList(java.util.ArrayList) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) StorageException(org.apache.ignite.internal.pagemem.wal.StorageException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteOutOfMemoryException(org.apache.ignite.internal.mem.IgniteOutOfMemoryException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) CacheStoppedException(org.apache.ignite.internal.processors.cache.CacheStoppedException) CacheStorePartialUpdateException(org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheVersionEx(org.apache.ignite.internal.processors.cache.version.GridCacheVersionEx) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation) GridCacheUpdateAtomicResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult)

Example 62 with GridCacheEntryRemovedException

use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.

the class GridDhtAtomicCache method reloadIfNeeded.

/**
 * @param entries Entries.
 * @throws IgniteCheckedException If failed.
 */
private void reloadIfNeeded(final List<GridDhtCacheEntry> entries) throws IgniteCheckedException {
    Map<KeyCacheObject, Integer> needReload = null;
    for (int i = 0; i < entries.size(); i++) {
        GridDhtCacheEntry entry = entries.get(i);
        if (entry == null)
            continue;
        CacheObject val = entry.rawGet();
        if (val == null) {
            if (needReload == null)
                needReload = new HashMap<>(entries.size(), 1.0f);
            needReload.put(entry.key(), i);
        }
    }
    if (needReload != null) {
        final Map<KeyCacheObject, Integer> idxMap = needReload;
        ctx.store().loadAll(null, needReload.keySet(), new CI2<KeyCacheObject, Object>() {

            @Override
            public void apply(KeyCacheObject k, Object v) {
                Integer idx = idxMap.get(k);
                if (idx != null) {
                    GridDhtCacheEntry entry = entries.get(idx);
                    try {
                        GridCacheVersion ver = entry.version();
                        entry.versionedValue(ctx.toCacheObject(v), null, ver, null, null);
                    } catch (GridCacheEntryRemovedException e) {
                        assert false : "Entry should not get obsolete while holding lock [entry=" + entry + ", e=" + e + ']';
                    } catch (IgniteCheckedException e) {
                        throw new IgniteException(e);
                    }
                }
            }
        });
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Example 63 with GridCacheEntryRemovedException

use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.

the class GridDhtAtomicCache method processDhtAtomicUpdateRequest.

/**
 * @param nodeId Sender node ID.
 * @param req Dht atomic update request.
 */
private void processDhtAtomicUpdateRequest(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
    assert Thread.currentThread().getName().startsWith("sys-stripe-") : Thread.currentThread().getName();
    if (msgLog.isDebugEnabled()) {
        msgLog.debug("Received DHT atomic update request [futId=" + req.futureId() + ", writeVer=" + req.writeVersion() + ", node=" + nodeId + ']');
    }
    assert req.partition() >= 0 : req;
    GridCacheVersion ver = req.writeVersion();
    GridDhtAtomicNearResponse nearRes = null;
    if (req.nearNodeId() != null) {
        nearRes = new GridDhtAtomicNearResponse(ctx.cacheId(), req.partition(), req.nearFutureId(), nodeId, req.flags());
    }
    boolean replicate = ctx.isDrEnabled();
    boolean intercept = req.forceTransformBackups() && ctx.config().getInterceptor() != null;
    String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
    ctx.shared().database().checkpointReadLock();
    try {
        for (int i = 0; i < req.size(); i++) {
            KeyCacheObject key = req.key(i);
            try {
                while (true) {
                    GridDhtCacheEntry entry = null;
                    try {
                        entry = entryExx(key);
                        CacheObject val = req.value(i);
                        CacheObject prevVal = req.previousValue(i);
                        EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
                        Long updateIdx = req.updateCounter(i);
                        GridCacheOperation op = entryProcessor != null ? TRANSFORM : (val != null) ? UPDATE : DELETE;
                        long ttl = req.ttl(i);
                        long expireTime = req.conflictExpireTime(i);
                        GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, op == TRANSFORM ? entryProcessor : val, op == TRANSFORM ? req.invokeArguments() : null, /*write-through*/
                        (ctx.store().isLocal() && !ctx.shared().localStorePrimaryOnly()) && writeThrough() && !req.skipStore(), /*read-through*/
                        false, /*retval*/
                        false, req.keepBinary(), /*expiry policy*/
                        null, /*event*/
                        true, /*metrics*/
                        true, /*primary*/
                        false, /*check version*/
                        !req.forceTransformBackups(), req.topologyVersion(), CU.empty0(), replicate ? DR_BACKUP : DR_NONE, ttl, expireTime, req.conflictVersion(i), false, intercept, req.subjectId(), taskName, prevVal, updateIdx, null);
                        if (updRes.removeVersion() != null)
                            ctx.onDeferredDelete(entry, updRes.removeVersion());
                        entry.onUnlock();
                        // While.
                        break;
                    } catch (GridCacheEntryRemovedException ignored) {
                        if (log.isDebugEnabled())
                            log.debug("Got removed entry while updating backup value (will retry): " + key);
                        entry = null;
                    } finally {
                        if (entry != null)
                            ctx.evicts().touch(entry, req.topologyVersion());
                    }
                }
            } catch (NodeStoppingException e) {
                U.warn(log, "Failed to update key on backup (local node is stopping): " + key);
                return;
            } catch (GridDhtInvalidPartitionException ignored) {
            // Ignore.
            } catch (IgniteCheckedException | RuntimeException e) {
                if (e instanceof RuntimeException && !X.hasCause(e, IgniteOutOfMemoryException.class))
                    throw (RuntimeException) e;
                IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e);
                if (nearRes != null)
                    nearRes.addFailedKey(key, err);
                U.error(log, "Failed to update key on backup node: " + key, e);
            }
        }
    } finally {
        ctx.shared().database().checkpointReadUnlock();
    }
    GridDhtAtomicUpdateResponse dhtRes = null;
    if (req.nearSize() > 0) {
        List<KeyCacheObject> nearEvicted;
        if (isNearEnabled(ctx))
            nearEvicted = ((GridNearAtomicCache<K, V>) near()).processDhtAtomicUpdateRequest(nodeId, req, nearRes);
        else {
            nearEvicted = new ArrayList<>(req.nearSize());
            for (int i = 0; i < req.nearSize(); i++) nearEvicted.add(req.nearKey(i));
        }
        if (nearEvicted != null) {
            dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
            dhtRes.nearEvicted(nearEvicted);
        }
    }
    try {
        // TODO fire events only after successful fsync
        if (ctx.shared().wal() != null)
            ctx.shared().wal().fsync(null);
    } catch (StorageException e) {
        if (dhtRes != null)
            dhtRes.onError(new IgniteCheckedException(e));
        if (nearRes != null)
            nearRes.onClassError(e);
    } catch (IgniteCheckedException e) {
        if (dhtRes != null)
            dhtRes.onError(e);
        if (nearRes != null)
            nearRes.onClassError(e);
    }
    if (nearRes != null)
        sendDhtNearResponse(req, nearRes);
    if (dhtRes == null && req.replyWithoutDelay()) {
        dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
    }
    if (dhtRes != null)
        sendDhtPrimaryResponse(nodeId, req, dhtRes);
    else
        sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId());
}
Also used : NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridNearAtomicCache(org.apache.ignite.internal.processors.cache.distributed.near.GridNearAtomicCache) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) IgniteOutOfMemoryException(org.apache.ignite.internal.mem.IgniteOutOfMemoryException) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation) GridCacheUpdateAtomicResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult) StorageException(org.apache.ignite.internal.pagemem.wal.StorageException)

Example 64 with GridCacheEntryRemovedException

use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.

the class GridDhtAtomicCache method updateAllAsyncInternal0.

/**
 * Executes local update after preloader fetched values.
 *
 * @param node Node.
 * @param req Update request.
 * @param completionCb Completion callback.
 */
private void updateAllAsyncInternal0(ClusterNode node, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
    GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
    assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
    GridDhtAtomicAbstractUpdateFuture dhtFut = null;
    IgniteCacheExpiryPolicy expiry = null;
    ctx.shared().database().checkpointReadLock();
    try {
        ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
        // If batch store update is enabled, we need to lock all entries.
        // First, need to acquire locks on cache entries, then check filter.
        List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
        ;
        Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
        try {
            GridDhtPartitionTopology top = topology();
            top.readLock();
            try {
                if (top.stopping()) {
                    res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
                    completionCb.apply(req, res);
                    return;
                }
                boolean remap = false;
                // external transaction or explicit lock.
                if (!req.topologyLocked()) {
                    // Can not wait for topology future since it will break
                    // GridNearAtomicCheckUpdateRequest processing.
                    remap = !top.topologyVersionFuture().exchangeDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
                }
                if (!remap) {
                    DhtAtomicUpdateResult updRes = update(node, locked, req, res);
                    dhtFut = updRes.dhtFuture();
                    deleted = updRes.deleted();
                    expiry = updRes.expiryPolicy();
                } else
                    // Should remap all keys.
                    res.remapTopologyVersion(top.lastTopologyChangeVersion());
            } finally {
                top.readUnlock();
            }
        } catch (GridCacheEntryRemovedException e) {
            assert false : "Entry should not become obsolete while holding lock.";
            e.printStackTrace();
        } finally {
            if (locked != null)
                unlockEntries(locked, req.topologyVersion());
            // Enqueue if necessary after locks release.
            if (deleted != null) {
                assert !deleted.isEmpty();
                assert ctx.deferredDelete() : this;
                for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
            }
            // TODO fire events only after successful fsync
            if (ctx.shared().wal() != null)
                ctx.shared().wal().fsync(null);
        }
    } catch (GridDhtInvalidPartitionException ignore) {
        if (log.isDebugEnabled())
            log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
        res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
    } catch (Throwable e) {
        // At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
        // an attempt to use cleaned resources.
        U.error(log, "Unexpected exception during cache update", e);
        res.addFailedKeys(req.keys(), e);
        completionCb.apply(req, res);
        if (e instanceof Error)
            throw (Error) e;
        return;
    } finally {
        ctx.shared().database().checkpointReadUnlock();
    }
    if (res.remapTopologyVersion() != null) {
        assert dhtFut == null;
        completionCb.apply(req, res);
    } else {
        if (dhtFut != null)
            dhtFut.map(node, res.returnValue(), res, completionCb);
    }
    if (req.writeSynchronizationMode() != FULL_ASYNC)
        req.cleanup(!node.isLocal());
    sendTtlUpdateRequest(expiry);
}
Also used : GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) CacheStoppedException(org.apache.ignite.internal.processors.cache.CacheStoppedException) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteCacheExpiryPolicy(org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy)

Example 65 with GridCacheEntryRemovedException

use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.

the class GridDhtForceKeysFuture method map.

/**
 * @param key Key.
 * @param exc Exclude nodes.
 * @param mappings Mappings.
 * @return Mappings.
 */
private Map<ClusterNode, Set<KeyCacheObject>> map(KeyCacheObject key, @Nullable Map<ClusterNode, Set<KeyCacheObject>> mappings, Collection<ClusterNode> exc) {
    ClusterNode loc = cctx.localNode();
    GridCacheEntryEx e = cctx.dht().peekEx(key);
    try {
        if (e != null && !e.isNewLocked()) {
            if (log.isDebugEnabled()) {
                int part = cctx.affinity().partition(key);
                log.debug("Will not rebalance key (entry is not new) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
            }
            // Key has been rebalanced or retrieved already.
            return mappings;
        }
    } catch (GridCacheEntryRemovedException ignore) {
        if (log.isDebugEnabled())
            log.debug("Received removed DHT entry for force keys request [entry=" + e + ", locId=" + cctx.nodeId() + ']');
    }
    int part = cctx.affinity().partition(key);
    List<ClusterNode> owners = F.isEmpty(exc) ? top.owners(part, topVer) : new ArrayList<>(F.view(top.owners(part, topVer), F.notIn(exc)));
    if (owners.isEmpty() || (owners.contains(loc) && cctx.rebalanceEnabled())) {
        if (log.isDebugEnabled())
            log.debug("Will not rebalance key (local node is owner) [key=" + key + ", part=" + part + "topVer=" + topVer + ", locId=" + cctx.nodeId() + ']');
        // Key is already rebalanced.
        return mappings;
    }
    // Create partition.
    GridDhtLocalPartition locPart = top.localPartition(part, topVer, false);
    if (log.isDebugEnabled())
        log.debug("Mapping local partition [loc=" + cctx.localNodeId() + ", topVer" + topVer + ", part=" + locPart + ", owners=" + owners + ", allOwners=" + U.toShortString(top.owners(part)) + ']');
    if (locPart == null)
        invalidParts.add(part);
    else if (!cctx.rebalanceEnabled() || locPart.state() == MOVING) {
        Collections.sort(owners, CU.nodeComparator(false));
        // Load from youngest owner.
        ClusterNode pick = F.first(owners);
        assert pick != null;
        if (!cctx.rebalanceEnabled() && loc.id().equals(pick.id()))
            pick = F.first(F.view(owners, F.remoteNodes(loc.id())));
        if (pick == null) {
            if (log.isDebugEnabled())
                log.debug("Will not rebalance key (no nodes to request from with rebalancing disabled) [key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
            return mappings;
        }
        if (mappings == null)
            mappings = U.newHashMap(keys.size());
        Collection<KeyCacheObject> mappedKeys = F.addIfAbsent(mappings, pick, F.<KeyCacheObject>newSet());
        assert mappedKeys != null;
        mappedKeys.add(key);
        if (log.isDebugEnabled())
            log.debug("Will rebalance key from node [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", node=" + pick.id() + ", locId=" + cctx.nodeId() + ']');
    } else if (locPart.state() != OWNING)
        invalidParts.add(part);
    else {
        if (log.isDebugEnabled())
            log.debug("Will not rebalance key (local partition is not MOVING) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + locPart + ", locId=" + cctx.nodeId() + ']');
    }
    return mappings;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) Collection(java.util.Collection) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Aggregations

GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)84 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)56 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)50 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)40 GridCacheEntryEx (org.apache.ignite.internal.processors.cache.GridCacheEntryEx)40 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)40 ClusterNode (org.apache.ignite.cluster.ClusterNode)19 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)18 IgniteTxEntry (org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry)18 GridDhtInvalidPartitionException (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException)17 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)16 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)16 ArrayList (java.util.ArrayList)14 EntryGetResult (org.apache.ignite.internal.processors.cache.EntryGetResult)14 GridCacheOperation (org.apache.ignite.internal.processors.cache.GridCacheOperation)13 GridDhtCacheEntry (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry)12 IgniteTxKey (org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey)12 Map (java.util.Map)10 GridDistributedCacheEntry (org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry)10 GridTimeoutObject (org.apache.ignite.internal.processors.timeout.GridTimeoutObject)10