Search in sources :

Example 41 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class IdleVerifyUtility method getUpdateCountersSnapshot.

/**
 * Gather updateCounters info.
 * Holds {@link org.apache.ignite.internal.processors.cache.PartitionUpdateCounter#copy} of update counters.
 *
 * @param ign Ignite instance.
 * @param grpIds Group Id`s.
 * @return Current groups distribution with update counters per partitions.
 */
public static Map<Integer, Map<Integer, PartitionUpdateCounter>> getUpdateCountersSnapshot(IgniteEx ign, Set<Integer> grpIds) {
    Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCountersPerGrp = new HashMap<>();
    for (Integer grpId : grpIds) {
        CacheGroupContext grpCtx = ign.context().cache().cacheGroup(grpId);
        if (grpCtx == null)
            throw new GridNotIdleException("Group not found: " + grpId + "." + " Possible reasons: rebalance in progress or concurrent cache destroy.");
        GridDhtPartitionTopology top = grpCtx.topology();
        Map<Integer, PartitionUpdateCounter> partsWithCounters = partsWithCountersPerGrp.computeIfAbsent(grpId, k -> new HashMap<>());
        for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
            if (part.state() != GridDhtPartitionState.OWNING)
                continue;
            @Nullable PartitionUpdateCounter updCntr = part.dataStore().partUpdateCounter();
            partsWithCounters.put(part.id(), updCntr == null ? null : updCntr.copy());
        }
    }
    return partsWithCountersPerGrp;
}
Also used : HashMap(java.util.HashMap) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) PartitionUpdateCounter(org.apache.ignite.internal.processors.cache.PartitionUpdateCounter) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) HashMap(java.util.HashMap) Map(java.util.Map) Nullable(org.jetbrains.annotations.Nullable)

Example 42 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCacheAdapter method localPreloadPartition.

/**
 * {@inheritDoc}
 */
@Override
public boolean localPreloadPartition(int part) throws IgniteCheckedException {
    if (!ctx.affinityNode())
        return false;
    GridDhtPartitionTopology top = ctx.group().topology();
    @Nullable GridDhtLocalPartition p = top.localPartition(part, top.readyTopologyVersion(), false);
    if (p == null)
        return false;
    try {
        if (!p.reserve() || p.state() != OWNING)
            return false;
        p.dataStore().preload();
    } finally {
        p.release();
    }
    return true;
}
Also used : GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) Nullable(org.jetbrains.annotations.Nullable)

Example 43 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class IgniteTxHandler method prepareNearTx.

/**
 * @param originTx Transaction for copy.
 * @param nearNode Node that initiated transaction.
 * @param req Near prepare request.
 * @return Prepare future or {@code null} if need retry operation.
 */
@Nullable
private IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final GridNearTxLocal originTx, final ClusterNode nearNode, final GridNearTxPrepareRequest req) {
    IgniteTxEntry firstEntry;
    try {
        IgniteTxEntry firstWrite = unmarshal(req.writes());
        IgniteTxEntry firstRead = unmarshal(req.reads());
        firstEntry = firstWrite != null ? firstWrite : firstRead;
    } catch (IgniteCheckedException e) {
        return new GridFinishedFuture<>(e);
    }
    GridDhtTxLocal tx = null;
    GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
    if (mappedVer != null) {
        tx = ctx.tm().tx(mappedVer);
        if (tx == null)
            U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
        else {
            if (req.concurrency() == PESSIMISTIC)
                tx.nearFutureId(req.futureId());
        }
    } else {
        GridDhtPartitionTopology top = null;
        if (req.firstClientRequest()) {
            assert firstEntry != null : req;
            assert req.concurrency() == OPTIMISTIC : req;
            assert nearNode.isClient() : nearNode;
            top = firstEntry.context().topology();
            top.readLock();
            if (req.allowWaitTopologyFuture()) {
                GridDhtTopologyFuture topFut = top.topologyVersionFuture();
                if (!topFut.isDone()) {
                    top.readUnlock();
                    return null;
                }
            }
        }
        try {
            if (top != null) {
                boolean retry = false;
                GridDhtTopologyFuture topFut = top.topologyVersionFuture();
                if (!req.allowWaitTopologyFuture() && !topFut.isDone()) {
                    retry = true;
                    if (txPrepareMsgLog.isDebugEnabled()) {
                        txPrepareMsgLog.debug("Topology change is in progress, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
                    }
                }
                if (!retry && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req)) {
                    retry = true;
                    if (txPrepareMsgLog.isDebugEnabled()) {
                        txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
                    }
                }
                if (retry) {
                    GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.lastTopologyChangeVersion(), req.onePhaseCommit(), req.deployInfo() != null);
                    try {
                        ctx.io().send(nearNode, res, req.policy());
                        if (txPrepareMsgLog.isDebugEnabled()) {
                            txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNode.id() + ']');
                        }
                    } catch (ClusterTopologyCheckedException ignored) {
                        if (txPrepareMsgLog.isDebugEnabled()) {
                            txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNode.id() + ']');
                        }
                    } catch (IgniteCheckedException e) {
                        U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNode.id() + ", req=" + req + ']', e);
                    }
                    return new GridFinishedFuture<>(res);
                }
                assert topFut.isDone();
            }
            tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), originTx);
            tx = ctx.tm().onCreated(null, tx);
            if (tx != null)
                tx.topologyVersion(req.topologyVersion());
            else
                U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
        } finally {
            if (tx != null)
                req.txState(tx.txState());
            if (top != null)
                top.readUnlock();
        }
    }
    if (tx != null) {
        req.txState(tx.txState());
        if (req.explicitLock())
            tx.explicitLock(true);
        tx.transactionNodes(req.transactionNodes());
        if (req.near())
            tx.nearOnOriginatingNode(true);
        if (req.onePhaseCommit()) {
            assert req.last() : req;
            tx.onePhaseCommit(true);
        }
        if (req.needReturnValue())
            tx.needReturnValue(true);
        IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req);
        if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
            if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
                tx.rollbackDhtLocalAsync();
        }
        final GridDhtTxLocal tx0 = tx;
        fut.listen(new CI1<IgniteInternalFuture<?>>() {

            @Override
            public void apply(IgniteInternalFuture<?> txFut) {
                try {
                    txFut.get();
                } catch (IgniteCheckedException e) {
                    // Just in case.
                    tx0.setRollbackOnly();
                    if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
                        U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
                }
            }
        });
        return fut;
    } else
        return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
Also used : GridDhtTxLocal(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) IgniteTxOptimisticCheckedException(org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridDhtTopologyFuture(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridNearTxPrepareResponse(org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse) IgniteFutureCancelledException(org.apache.ignite.lang.IgniteFutureCancelledException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) Nullable(org.jetbrains.annotations.Nullable)

Example 44 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class CacheAffinitySharedManager method initAffinityOnNodeJoin.

/**
 * @param evts Discovery events processed during exchange.
 * @param addedOnExchnage {@code True} if cache group was added during this exchange.
 * @param grpHolder Group holder.
 * @param rebalanceInfo Rebalance information on coordinator or null on other nodes.
 * @param latePrimary If {@code true} delays primary assignment if it is not owner.
 */
private void initAffinityOnNodeJoin(ExchangeDiscoveryEvents evts, boolean addedOnExchnage, CacheGroupHolder grpHolder, @Nullable WaitRebalanceInfo rebalanceInfo, boolean latePrimary) {
    GridAffinityAssignmentCache aff = grpHolder.affinity();
    if (addedOnExchnage) {
        if (!aff.lastVersion().equals(evts.topologyVersion()))
            calculateAndInit(evts, aff, evts.topologyVersion());
        return;
    }
    AffinityTopologyVersion affTopVer = aff.lastVersion();
    assert affTopVer.topologyVersion() > 0 : "Affinity is not initialized [grp=" + aff.cacheOrGroupName() + ", topVer=" + affTopVer + ", node=" + cctx.localNodeId() + ']';
    List<List<ClusterNode>> curAff = aff.assignments(affTopVer);
    assert aff.idealAssignment() != null : "Previous assignment is not available.";
    List<List<ClusterNode>> idealAssignment = aff.calculate(evts.topologyVersion(), evts, evts.discoveryCache()).assignment();
    List<List<ClusterNode>> newAssignment = null;
    if (latePrimary) {
        for (int p = 0; p < idealAssignment.size(); p++) {
            List<ClusterNode> newNodes = idealAssignment.get(p);
            List<ClusterNode> curNodes = curAff.get(p);
            ClusterNode curPrimary = !curNodes.isEmpty() ? curNodes.get(0) : null;
            ClusterNode newPrimary = !newNodes.isEmpty() ? newNodes.get(0) : null;
            if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) {
                assert cctx.discovery().node(evts.topologyVersion(), curPrimary.id()) != null : curPrimary;
                List<ClusterNode> nodes0 = latePrimaryAssignment(aff, p, curPrimary, newNodes, rebalanceInfo);
                if (newAssignment == null)
                    newAssignment = new ArrayList<>(idealAssignment);
                newAssignment.set(p, nodes0);
            }
            GridDhtPartitionTopology top = grpHolder.topology(evts.discoveryCache());
            if (rebalanceInfo != null) {
                List<ClusterNode> owners = top.owners(p, evts.topologyVersion());
                // A group with lost partitions never gets rebalanced so should not be added to waitInfo.
                if (!owners.isEmpty() && !owners.containsAll(idealAssignment.get(p)) && !top.lostPartitions().contains(p))
                    rebalanceInfo.add(aff.groupId(), p, newNodes);
            }
        }
    }
    if (newAssignment == null)
        newAssignment = idealAssignment;
    aff.initialize(evts.topologyVersion(), newAssignment);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridAffinityAssignmentCache(org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache) ArrayList(java.util.ArrayList) GridLongList(org.apache.ignite.internal.util.GridLongList) List(java.util.List) ArrayList(java.util.ArrayList)

Example 45 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method resetLostPartitions.

/**
 * @param cacheNames Cache names.
 */
private void resetLostPartitions(Collection<String> cacheNames) {
    assert !exchCtx.mergeExchanges();
    try {
        doInParallelUninterruptibly(U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2), cctx.kernalContext().pools().getSystemExecutorService(), cctx.affinity().caches().values(), desc -> {
            if (desc.cacheConfiguration().getCacheMode() == CacheMode.LOCAL)
                return null;
            if (cacheNames.contains(desc.cacheName())) {
                CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
                GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), events().discoveryCache());
                top.resetLostPartitions(initialVersion());
            }
            return null;
        });
    } catch (IgniteCheckedException e) {
        throw new IgniteException(e);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) IgniteException(org.apache.ignite.IgniteException) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Aggregations

GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology)64 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)24 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)21 ClusterNode (org.apache.ignite.cluster.ClusterNode)20 Map (java.util.Map)18 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)18 CacheGroupContext (org.apache.ignite.internal.processors.cache.CacheGroupContext)17 HashMap (java.util.HashMap)15 ArrayList (java.util.ArrayList)14 Ignite (org.apache.ignite.Ignite)14 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)12 Test (org.junit.Test)12 IgniteEx (org.apache.ignite.internal.IgniteEx)11 UUID (java.util.UUID)10 IgniteKernal (org.apache.ignite.internal.IgniteKernal)10 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 IgniteException (org.apache.ignite.IgniteException)9 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)9 GridDhtPartitionMap (org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap)9 HashSet (java.util.HashSet)8