Search in sources :

Example 6 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.

the class IgniteTxHandler method prepareNearTx.

/**
     * @param nearNodeId Near node ID that initiated transaction.
     * @param req Near prepare request.
     * @param locReq Local request flag.
     * @return Prepare future.
     */
public IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final UUID nearNodeId, final GridNearTxPrepareRequest req, boolean locReq) {
    // Make sure not to provide Near entries to DHT cache.
    if (locReq)
        req.cloneEntries();
    ClusterNode nearNode = ctx.node(nearNodeId);
    if (nearNode == null) {
        if (txPrepareMsgLog.isDebugEnabled()) {
            txPrepareMsgLog.debug("Received near prepare from node that left grid (will ignore) [" + "txId=" + req.version() + ", node=" + nearNodeId + ']');
        }
        return null;
    }
    IgniteTxEntry firstEntry;
    try {
        IgniteTxEntry firstWrite = unmarshal(req.writes());
        IgniteTxEntry firstRead = unmarshal(req.reads());
        firstEntry = firstWrite != null ? firstWrite : firstRead;
    } catch (IgniteCheckedException e) {
        return new GridFinishedFuture<>(e);
    }
    assert firstEntry != null : req;
    GridDhtTxLocal tx = null;
    GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
    if (mappedVer != null) {
        tx = ctx.tm().tx(mappedVer);
        if (tx == null)
            U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
        else {
            if (req.concurrency() == PESSIMISTIC)
                tx.nearFutureId(req.futureId());
        }
    } else {
        GridDhtPartitionTopology top = null;
        if (req.firstClientRequest()) {
            assert req.concurrency() == OPTIMISTIC : req;
            assert CU.clientNode(nearNode) : nearNode;
            top = firstEntry.context().topology();
            top.readLock();
        }
        try {
            if (top != null && needRemap(req.topologyVersion(), top.topologyVersion(), req)) {
                if (txPrepareMsgLog.isDebugEnabled()) {
                    txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNodeId + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.topologyVersion() + ", req=" + req + ']');
                }
                GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.topologyVersion(), req.deployInfo() != null);
                try {
                    ctx.io().send(nearNodeId, res, req.policy());
                    if (txPrepareMsgLog.isDebugEnabled()) {
                        txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNodeId + ']');
                    }
                } catch (ClusterTopologyCheckedException ignored) {
                    if (txPrepareMsgLog.isDebugEnabled()) {
                        txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNodeId + ']');
                    }
                } catch (IgniteCheckedException e) {
                    U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNodeId + ", req=" + req + ']', e);
                }
                return new GridFinishedFuture<>(res);
            }
            tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), req.subjectId(), req.taskNameHash());
            tx = ctx.tm().onCreated(null, tx);
            if (tx != null)
                tx.topologyVersion(req.topologyVersion());
            else
                U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
        } finally {
            if (tx != null)
                req.txState(tx.txState());
            if (top != null)
                top.readUnlock();
        }
    }
    if (tx != null) {
        req.txState(tx.txState());
        if (req.explicitLock())
            tx.explicitLock(true);
        tx.transactionNodes(req.transactionNodes());
        if (req.near())
            tx.nearOnOriginatingNode(true);
        if (req.onePhaseCommit()) {
            assert req.last() : req;
            tx.onePhaseCommit(true);
        }
        if (req.needReturnValue())
            tx.needReturnValue(true);
        IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req.reads(), req.writes(), req.dhtVersions(), req.messageId(), req.miniId(), req.transactionNodes(), req.last());
        if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
            if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
                tx.rollbackDhtLocalAsync();
        }
        final GridDhtTxLocal tx0 = tx;
        fut.listen(new CI1<IgniteInternalFuture<?>>() {

            @Override
            public void apply(IgniteInternalFuture<?> txFut) {
                try {
                    txFut.get();
                } catch (IgniteCheckedException e) {
                    // Just in case.
                    tx0.setRollbackOnly();
                    if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
                        U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
                }
            }
        });
        return fut;
    } else
        return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtTxLocal(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) IgniteTxOptimisticCheckedException(org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridNearTxPrepareResponse(org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse) IgniteFutureCancelledException(org.apache.ignite.lang.IgniteFutureCancelledException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 7 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method updatePartitionSingleMap.

/**
     * Updates partition map in all caches.
     *
     * @param msg Partitions single message.
     */
private void updatePartitionSingleMap(ClusterNode node, GridDhtPartitionsSingleMessage msg) {
    msgs.put(node.id(), msg);
    for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
        Integer cacheId = entry.getKey();
        GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
        GridDhtPartitionTopology top = cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this);
        top.update(exchId, entry.getValue());
    }
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 8 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method onAllReceived.

/**
     *
     */
private void onAllReceived() {
    try {
        assert crd.isLocal();
        if (!crd.equals(discoCache.serverNodes().get(0))) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
                if (!cacheCtx.isLocal())
                    cacheCtx.topology().beforeExchange(this, !centralizedAff);
            }
        }
        for (GridDhtPartitionsAbstractMessage msg : msgs.values()) {
            if (msg instanceof GridDhtPartitionsSingleMessage) {
                GridDhtPartitionsSingleMessage msg0 = (GridDhtPartitionsSingleMessage) msg;
                for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg0.partitions().entrySet()) {
                    Integer cacheId = entry.getKey();
                    GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
                    GridDhtPartitionTopology top = cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this);
                    Map<Integer, T2<Long, Long>> cntrs = msg0.partitionUpdateCounters(cacheId);
                    if (cntrs != null)
                        top.applyUpdateCounters(cntrs);
                }
            }
        }
        if (discoEvt.type() == EVT_NODE_JOINED) {
            if (cctx.kernalContext().state().active())
                assignPartitionsStates();
        } else if (discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
            assert discoEvt instanceof DiscoveryCustomEvent;
            if (((DiscoveryCustomEvent) discoEvt).customMessage() instanceof DynamicCacheChangeBatch) {
                if (exchActions != null) {
                    if (exchActions.newClusterState() == ClusterState.ACTIVE)
                        assignPartitionsStates();
                    Set<String> caches = exchActions.cachesToResetLostPartitions();
                    if (!F.isEmpty(caches))
                        resetLostPartitions(caches);
                }
            }
        } else if (discoEvt.type() == EVT_NODE_LEFT || discoEvt.type() == EVT_NODE_FAILED)
            detectLostPartitions();
        updateLastVersion(cctx.versions().last());
        cctx.versions().onExchange(lastVer.get().order());
        if (centralizedAff) {
            IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>> fut = cctx.affinity().initAffinityOnNodeLeft(this);
            if (!fut.isDone()) {
                fut.listen(new IgniteInClosure<IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>>>() {

                    @Override
                    public void apply(IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>> fut) {
                        onAffinityInitialized(fut);
                    }
                });
            } else
                onAffinityInitialized(fut);
        } else {
            List<ClusterNode> nodes;
            synchronized (this) {
                srvNodes.remove(cctx.localNode());
                nodes = new ArrayList<>(srvNodes);
            }
            if (!nodes.isEmpty())
                sendAllPartitions(nodes);
            if (exchangeOnChangeGlobalState && !F.isEmpty(changeGlobalStateExceptions))
                cctx.kernalContext().state().onFullResponseMessage(changeGlobalStateExceptions);
            onDone(exchangeId().topologyVersion());
        }
    } catch (IgniteCheckedException e) {
        if (reconnectOnError(e))
            onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
        else
            onDone(e);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) Set(java.util.Set) HashSet(java.util.HashSet) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) DiscoveryCustomEvent(org.apache.ignite.internal.events.DiscoveryCustomEvent) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) DynamicCacheChangeBatch(org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch) List(java.util.List) ArrayList(java.util.ArrayList) UUID(java.util.UUID) IgniteNeedReconnectException(org.apache.ignite.internal.IgniteNeedReconnectException) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) T2(org.apache.ignite.internal.util.typedef.T2)

Example 9 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.

the class CacheAffinitySharedManager method initAffinityOnNodeLeft0.

/**
     * @param fut Exchange future.
     * @return Affinity assignment.
     * @throws IgniteCheckedException If failed.
     */
private Map<Integer, Map<Integer, List<UUID>>> initAffinityOnNodeLeft0(final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException {
    final AffinityTopologyVersion topVer = fut.topologyVersion();
    final WaitRebalanceInfo waitRebalanceInfo = new WaitRebalanceInfo(topVer);
    final Collection<ClusterNode> aliveNodes = cctx.discovery().nodes(topVer);
    final Map<Integer, Map<Integer, List<UUID>>> assignment = new HashMap<>();
    forAllRegisteredCaches(new IgniteInClosureX<DynamicCacheDescriptor>() {

        @Override
        public void applyx(DynamicCacheDescriptor cacheDesc) throws IgniteCheckedException {
            CacheHolder cache = cache(fut, cacheDesc);
            if (!cache.rebalanceEnabled)
                return;
            AffinityTopologyVersion affTopVer = cache.affinity().lastVersion();
            assert affTopVer.topologyVersion() > 0 && !affTopVer.equals(topVer) : "Invalid affinity version " + "[last=" + affTopVer + ", futVer=" + topVer + ", cache=" + cache.name() + ']';
            List<List<ClusterNode>> curAssignment = cache.affinity().assignments(affTopVer);
            List<List<ClusterNode>> newAssignment = cache.affinity().idealAssignment();
            assert newAssignment != null;
            GridDhtPartitionTopology top = cache.topology(fut);
            Map<Integer, List<UUID>> cacheAssignment = null;
            for (int p = 0; p < newAssignment.size(); p++) {
                List<ClusterNode> newNodes = newAssignment.get(p);
                List<ClusterNode> curNodes = curAssignment.get(p);
                ClusterNode curPrimary = curNodes.size() > 0 ? curNodes.get(0) : null;
                ClusterNode newPrimary = newNodes.size() > 0 ? newNodes.get(0) : null;
                List<ClusterNode> newNodes0 = null;
                assert newPrimary == null || aliveNodes.contains(newPrimary) : "Invalid new primary [" + "cache=" + cache.name() + ", node=" + newPrimary + ", topVer=" + topVer + ']';
                if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) {
                    if (aliveNodes.contains(curPrimary)) {
                        GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
                        if (state != GridDhtPartitionState.OWNING) {
                            newNodes0 = latePrimaryAssignment(cache.affinity(), p, curPrimary, newNodes, waitRebalanceInfo);
                        }
                    } else {
                        GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
                        if (state != GridDhtPartitionState.OWNING) {
                            for (int i = 1; i < curNodes.size(); i++) {
                                ClusterNode curNode = curNodes.get(i);
                                if (top.partitionState(curNode.id(), p) == GridDhtPartitionState.OWNING) {
                                    newNodes0 = latePrimaryAssignment(cache.affinity(), p, curNode, newNodes, waitRebalanceInfo);
                                    break;
                                }
                            }
                            if (newNodes0 == null) {
                                List<ClusterNode> owners = top.owners(p);
                                for (ClusterNode owner : owners) {
                                    if (aliveNodes.contains(owner)) {
                                        newNodes0 = latePrimaryAssignment(cache.affinity(), p, owner, newNodes, waitRebalanceInfo);
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }
                if (newNodes0 != null) {
                    if (cacheAssignment == null)
                        cacheAssignment = new HashMap<>();
                    cacheAssignment.put(p, toIds0(newNodes0));
                }
            }
            if (cacheAssignment != null)
                assignment.put(cache.cacheId(), cacheAssignment);
        }
    });
    synchronized (mux) {
        assert affCalcVer.equals(topVer);
        this.waitInfo = !waitRebalanceInfo.empty() ? waitRebalanceInfo : null;
        WaitRebalanceInfo info = this.waitInfo;
        if (log.isDebugEnabled()) {
            log.debug("Computed new affinity after node left [topVer=" + topVer + ", waitCaches=" + (info != null ? cacheNames(info.waitCaches.keySet()) : null) + ']');
        }
    }
    return assignment;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDhtPartitionState(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 10 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.

the class GridDhtAtomicCache method updateAllAsyncInternal0.

/**
     * Executes local update after preloader fetched values.
     *
     * @param nodeId Node ID.
     * @param req Update request.
     * @param completionCb Completion callback.
     */
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
    ClusterNode node = ctx.discovery().node(nodeId);
    if (node == null) {
        U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
        return;
    }
    GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
    assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
    GridDhtAtomicAbstractUpdateFuture dhtFut = null;
    boolean remap = false;
    String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
    IgniteCacheExpiryPolicy expiry = null;
    try {
        // If batch store update is enabled, we need to lock all entries.
        // First, need to acquire locks on cache entries, then check filter.
        List<GridDhtCacheEntry> locked = null;
        Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
        try {
            GridDhtPartitionTopology top = topology();
            top.readLock();
            try {
                if (top.stopping()) {
                    res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
                    completionCb.apply(req, res);
                    return;
                }
                // external transaction or explicit lock.
                if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
                    ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
                    locked = lockEntries(req, req.topologyVersion());
                    boolean hasNear = ctx.discovery().cacheNearNode(node, name());
                    // Assign next version for update inside entries lock.
                    GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
                    if (hasNear)
                        res.nearVersion(ver);
                    if (msgLog.isDebugEnabled()) {
                        msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
                    }
                    assert ver != null : "Got null version for update request: " + req;
                    boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
                    dhtFut = createDhtFuture(ver, req);
                    expiry = expiryPolicy(req.expiry());
                    GridCacheReturn retVal = null;
                    if (// Several keys ...
                    req.size() > 1 && writeThrough() && // and store is enabled ...
                    !req.skipStore() && // and this is not local store ...
                    !ctx.store().isLocal() && // (conflict resolver should be used for local store)
                    !// and no DR.
                    ctx.dr().receiveEnabled()) {
                        // This method can only be used when there are no replicated entries in the batch.
                        UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
                        deleted = updRes.deleted();
                        dhtFut = updRes.dhtFuture();
                        if (req.operation() == TRANSFORM)
                            retVal = updRes.invokeResults();
                    } else {
                        UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
                        retVal = updRes.returnValue();
                        deleted = updRes.deleted();
                        dhtFut = updRes.dhtFuture();
                    }
                    if (retVal == null)
                        retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
                    res.returnValue(retVal);
                    if (dhtFut != null) {
                        if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
                        !ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
                            final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
                            if (tracker != null && tracker instanceof GridNioMessageTracker) {
                                ((GridNioMessageTracker) tracker).onMessageReceived();
                                dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {

                                    @Override
                                    public void apply(IgniteInternalFuture<Void> fut) {
                                        ((GridNioMessageTracker) tracker).onMessageProcessed();
                                    }
                                });
                            }
                        }
                        ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
                    }
                } else {
                    // Should remap all keys.
                    remap = true;
                    res.remapTopologyVersion(top.topologyVersion());
                }
            } finally {
                top.readUnlock();
            }
        } catch (GridCacheEntryRemovedException e) {
            assert false : "Entry should not become obsolete while holding lock.";
            e.printStackTrace();
        } finally {
            if (locked != null)
                unlockEntries(locked, req.topologyVersion());
            // Enqueue if necessary after locks release.
            if (deleted != null) {
                assert !deleted.isEmpty();
                assert ctx.deferredDelete() : this;
                for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
            }
            // TODO fire events only after successful fsync
            if (ctx.shared().wal() != null)
                ctx.shared().wal().fsync(null);
        }
    } catch (GridDhtInvalidPartitionException ignore) {
        if (log.isDebugEnabled())
            log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
        remap = true;
        res.remapTopologyVersion(ctx.topology().topologyVersion());
    } catch (Throwable e) {
        // At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
        // an attempt to use cleaned resources.
        U.error(log, "Unexpected exception during cache update", e);
        res.addFailedKeys(req.keys(), e);
        completionCb.apply(req, res);
        if (e instanceof Error)
            throw (Error) e;
        return;
    }
    if (remap) {
        assert dhtFut == null;
        completionCb.apply(req, res);
    } else if (dhtFut != null)
        dhtFut.map(node, res.returnValue(), res, completionCb);
    if (req.writeSynchronizationMode() != FULL_ASYNC)
        req.cleanup(!node.isLocal());
    sendTtlUpdateRequest(expiry);
}
Also used : IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) GridNioMessageTracker(org.apache.ignite.internal.util.nio.GridNioMessageTracker) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) IgniteCacheExpiryPolicy(org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy)

Aggregations

GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology)13 ClusterNode (org.apache.ignite.cluster.ClusterNode)8 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)7 Map (java.util.Map)6 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)6 HashMap (java.util.HashMap)5 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)5 ConcurrentMap (java.util.concurrent.ConcurrentMap)4 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)4 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)4 UUID (java.util.UUID)3 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)3 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 List (java.util.List)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 Ignite (org.apache.ignite.Ignite)2 IgniteKernal (org.apache.ignite.internal.IgniteKernal)2