Search in sources :

Example 1 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method assignPartitionsStates.

/**
 */
private void assignPartitionsStates() {
    for (Map.Entry<Integer, CacheGroupDescriptor> e : cctx.affinity().cacheGroups().entrySet()) {
        CacheGroupDescriptor grpDesc = e.getValue();
        if (grpDesc.config().getCacheMode() == CacheMode.LOCAL)
            continue;
        if (!CU.isPersistentCache(grpDesc.config(), cctx.gridConfig().getDataStorageConfiguration()))
            continue;
        CacheGroupContext grpCtx = cctx.cache().cacheGroup(e.getKey());
        GridDhtPartitionTopology top = grpCtx != null ? grpCtx.topology() : cctx.exchange().clientTopology(e.getKey(), events().discoveryCache());
        assignPartitionStates(top);
    }
}
Also used : GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) CacheGroupDescriptor(org.apache.ignite.internal.processors.cache.CacheGroupDescriptor) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 2 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method distributedExchange.

/**
 * @throws IgniteCheckedException If failed.
 */
private void distributedExchange() throws IgniteCheckedException {
    assert crd != null;
    assert !cctx.kernalContext().clientNode();
    for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
        if (grp.isLocal())
            continue;
        cctx.exchange().exchangerBlockingSectionBegin();
        try {
            grp.preloader().onTopologyChanged(this);
        } finally {
            cctx.exchange().exchangerBlockingSectionEnd();
        }
    }
    timeBag.finishGlobalStage("Preloading notification");
    // Skipping wait on local join is available when all cluster nodes have the same protocol.
    boolean skipWaitOnLocalJoin = localJoinExchange() && cctx.exchange().latch().canSkipJoiningNodes(initialVersion());
    if (context().exchangeFreeSwitch() && isBaselineNodeFailed()) {
        // Currently MVCC does not support operations on partially switched cluster.
        if (cctx.kernalContext().coordinators().mvccEnabled())
            waitPartitionRelease(EXCHANGE_FREE_LATCH_ID, true, false);
        else
            waitPartitionRelease(null, false, false);
    } else if (!skipWaitOnLocalJoin) {
        // Skip partition release if node has locally joined (it doesn't have any updates to be finished).
        boolean distributed = true;
        // Do not perform distributed partition release in case of cluster activation.
        if (activateCluster())
            distributed = false;
        // On first phase we wait for finishing all local tx updates, atomic updates and lock releases on all nodes.
        waitPartitionRelease(EXCHANGE_LATCH_ID, distributed, true);
        // Second phase is needed to wait for finishing all tx updates from primary to backup nodes remaining after first phase.
        if (distributed)
            waitPartitionRelease(EXCHANGE_LATCH_ID, false, false);
    } else {
        if (log.isInfoEnabled())
            log.info("Skipped waiting for partitions release future (local node is joining) " + "[topVer=" + initialVersion() + "]");
    }
    boolean topChanged = firstDiscoEvt.type() != EVT_DISCOVERY_CUSTOM_EVT || affChangeMsg != null;
    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
        if (cacheCtx.isLocal() || cacheStopping(cacheCtx.cacheId()))
            continue;
        if (topChanged) {
            // Partition release future is done so we can flush the write-behind store.
            cctx.exchange().exchangerBlockingSectionBegin();
            try {
                cacheCtx.store().forceFlush();
            } finally {
                cctx.exchange().exchangerBlockingSectionEnd();
            }
        }
    }
    cctx.exchange().exchangerBlockingSectionBegin();
    try {
        /* It is necessary to run database callback before all topology callbacks.
               In case of persistent store is enabled we first restore partitions presented on disk.
               We need to guarantee that there are no partition state changes logged to WAL before this callback
               to make sure that we correctly restored last actual states. */
        cctx.database().beforeExchange(this);
    } finally {
        cctx.exchange().exchangerBlockingSectionEnd();
    }
    // Pre-create missing partitions using current affinity.
    if (!exchCtx.mergeExchanges() && !exchCtx.exchangeFreeSwitch()) {
        for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
            if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
                continue;
            // It is possible affinity is not initialized yet if node joins to cluster.
            if (grp.affinity().lastVersion().topologyVersion() > 0) {
                cctx.exchange().exchangerBlockingSectionBegin();
                try {
                    grp.topology().beforeExchange(this, !centralizedAff && !forceAffReassignment, false);
                } finally {
                    cctx.exchange().exchangerBlockingSectionEnd();
                }
            }
        }
    }
    // After all partitions have been restored and pre-created it's safe to make first checkpoint.
    if (localJoinExchange() || activateCluster()) {
        cctx.exchange().exchangerBlockingSectionBegin();
        try {
            cctx.database().onStateRestored(initialVersion());
        } finally {
            cctx.exchange().exchangerBlockingSectionEnd();
        }
    }
    timeBag.finishGlobalStage("After states restored callback");
    cctx.exchange().exchangerBlockingSectionBegin();
    try {
        cctx.database().releaseHistoryForPreloading();
        // To correctly rebalance when persistence is enabled, it is necessary to reserve history within exchange.
        partHistReserved = cctx.database().reserveHistoryForExchange();
    } finally {
        cctx.exchange().exchangerBlockingSectionEnd();
    }
    clearingPartitions = new HashMap();
    timeBag.finishGlobalStage("WAL history reservation");
    changeWalModeIfNeeded();
    if (events().hasServerLeft())
        finalizePartitionCounters();
    cctx.exchange().exchangerBlockingSectionBegin();
    try {
        if (context().exchangeFreeSwitch()) {
            // Update local maps, see CachePartitionLossWithRestartsTest.
            doInParallel(U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2), cctx.kernalContext().pools().getSystemExecutorService(), cctx.affinity().cacheGroups().values(), desc -> {
                if (desc.config().getCacheMode() == CacheMode.LOCAL)
                    return null;
                CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
                GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), events().discoveryCache());
                // Not expecting new moving partitions.
                top.beforeExchange(this, true, false);
                return null;
            });
        } else {
            if (crd.isLocal()) {
                if (remaining.isEmpty()) {
                    initFut.onDone(true);
                    onAllReceived(null);
                }
            } else
                sendPartitions(crd);
            initDone();
        }
    } finally {
        cctx.exchange().exchangerBlockingSectionEnd();
    }
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Example 3 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method processFullMessage.

/**
 * @param checkCrd If {@code true} checks that local node is exchange coordinator.
 * @param node Sender node.
 * @param msg Message.
 */
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
    try {
        assert exchId.equals(msg.exchangeId()) : msg;
        assert msg.lastVersion() != null : msg;
        timeBag.finishGlobalStage("Waiting for Full message");
        if (checkCrd) {
            assert node != null;
            synchronized (mux) {
                if (crd == null) {
                    if (log.isInfoEnabled())
                        log.info("Ignore full message, all server nodes left: " + msg);
                    return;
                }
                switch(state) {
                    case CRD:
                    case BECOME_CRD:
                        {
                            if (log.isInfoEnabled())
                                log.info("Ignore full message, node is coordinator: " + msg);
                            return;
                        }
                    case DONE:
                        {
                            if (log.isInfoEnabled())
                                log.info("Ignore full message, future is done: " + msg);
                            return;
                        }
                    case SRV:
                    case CLIENT:
                        {
                            if (!crd.equals(node)) {
                                if (log.isInfoEnabled()) {
                                    log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
                                }
                                if (node.order() > crd.order())
                                    fullMsgs.put(node, msg);
                                return;
                            } else {
                                if (!F.isEmpty(msg.getErrorsMap())) {
                                    Exception e = msg.getErrorsMap().get(cctx.localNodeId());
                                    if (e instanceof IgniteNeedReconnectException) {
                                        onDone(e);
                                        return;
                                    }
                                }
                                AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
                                if (log.isInfoEnabled()) {
                                    log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
                                }
                                finishState = new FinishState(crd.id(), resVer, msg);
                                state = ExchangeLocalState.DONE;
                                break;
                            }
                        }
                }
            }
        } else
            assert node == null : node;
        AffinityTopologyVersion resTopVer = initialVersion();
        if (exchCtx.mergeExchanges()) {
            if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
                if (log.isInfoEnabled()) {
                    log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
                }
                resTopVer = msg.resultTopologyVersion();
                if (cctx.exchange().mergeExchanges(this, msg)) {
                    assert cctx.kernalContext().isStopping() || cctx.kernalContext().clientDisconnected();
                    // Node is stopping, no need to further process exchange.
                    return;
                }
                assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
            }
            exchCtx.events().processEvents(this);
            if (localJoinExchange()) {
                Set<Integer> noAffinityGroups = cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
                // Prevent cache usage by a user.
                if (!noAffinityGroups.isEmpty()) {
                    List<GridCacheAdapter> closedCaches = cctx.cache().blockGateways(noAffinityGroups);
                    closedCaches.forEach(cache -> log.warning("Affinity for cache " + cache.context().name() + " has not received from coordinator during local join. " + " Probably cache is already stopped but not processed on local node yet." + " Cache proxy will be closed for user interactions for safety."));
                }
            } else {
                if (exchCtx.events().hasServerLeft())
                    cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
                else
                    cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
                for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
                    if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
                        continue;
                    grp.topology().beforeExchange(this, true, false);
                }
            }
        } else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
            cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
        else if (forceAffReassignment)
            cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
        timeBag.finishGlobalStage("Affinity recalculation");
        if (dynamicCacheStartExchange() && !F.isEmpty(exchangeGlobalExceptions)) {
            assert cctx.localNode().isClient();
            // TODO: https://issues.apache.org/jira/browse/IGNITE-8796
            // The current exchange has been successfully completed on all server nodes,
            // but has failed on that client node for some reason.
            // It looks like that we need to rollback dynamically started caches on the client node,
            // complete DynamicCacheStartFutures (if they are registered) with the cause of that failure
            // and complete current exchange without errors.
            onDone(exchangeLocE);
            return;
        }
        updatePartitionFullMap(resTopVer, msg);
        if (msg.rebalanced())
            markRebalanced();
        if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap()))
            cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
        if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
            DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
            if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions())
                markAffinityReassign();
        }
        onDone(resTopVer, null);
    } catch (IgniteCheckedException e) {
        onDone(e);
    }
}
Also used : AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) DiscoveryCustomMessage(org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage) DiscoveryCustomEvent(org.apache.ignite.internal.events.DiscoveryCustomEvent) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) IOException(java.io.IOException) IgniteClientDisconnectedCheckedException(org.apache.ignite.internal.IgniteClientDisconnectedCheckedException) IgniteException(org.apache.ignite.IgniteException) IgniteNeedReconnectException(org.apache.ignite.internal.IgniteNeedReconnectException) IgniteFutureTimeoutCheckedException(org.apache.ignite.internal.IgniteFutureTimeoutCheckedException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheAdapter(org.apache.ignite.internal.processors.cache.GridCacheAdapter) SnapshotDiscoveryMessage(org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage) IgniteNeedReconnectException(org.apache.ignite.internal.IgniteNeedReconnectException) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Example 4 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method updatePartitionFullMap.

/**
 * Updates partition map in all caches.
 *
 * @param resTopVer Result topology version.
 * @param msg Partitions full messages.
 */
private void updatePartitionFullMap(AffinityTopologyVersion resTopVer, GridDhtPartitionsFullMessage msg) {
    cctx.versions().onExchange(msg.lastVersion().order());
    assert partHistSuppliers.isEmpty();
    partHistSuppliers.putAll(msg.partitionHistorySuppliers());
    // Reserve at least 2 threads for system operations.
    int parallelismLvl = U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2);
    try {
        Map<Integer, Map<Integer, Long>> partsSizes = msg.partitionSizes(cctx);
        doInParallel(parallelismLvl, cctx.kernalContext().pools().getSystemExecutorService(), msg.partitions().keySet(), grpId -> {
            CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
            if (grp != null) {
                CachePartitionFullCountersMap cntrMap = msg.partitionUpdateCounters(grpId, grp.topology().partitions());
                grp.topology().update(resTopVer, msg.partitions().get(grpId), cntrMap, msg.partsToReload(cctx.localNodeId(), grpId), partsSizes.getOrDefault(grpId, Collections.emptyMap()), null, this, msg.lostPartitions(grpId));
            } else {
                GridDhtPartitionTopology top = cctx.exchange().clientTopology(grpId, events().discoveryCache());
                CachePartitionFullCountersMap cntrMap = msg.partitionUpdateCounters(grpId, top.partitions());
                top.update(resTopVer, msg.partitions().get(grpId), cntrMap, Collections.emptySet(), null, null, this, msg.lostPartitions(grpId));
            }
            return null;
        });
    } catch (IgniteCheckedException e) {
        throw new IgniteException(e);
    }
    timeBag.finishGlobalStage("Full map updating");
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) IgniteException(org.apache.ignite.IgniteException) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 5 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method onAllServersLeft.

/**
 */
private void onAllServersLeft() {
    assert cctx.kernalContext().clientNode() : cctx.localNode();
    List<ClusterNode> empty = Collections.emptyList();
    for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
        List<List<ClusterNode>> affAssignment = new ArrayList<>(grp.affinity().partitions());
        for (int i = 0; i < grp.affinity().partitions(); i++) affAssignment.add(empty);
        grp.affinity().idealAssignment(initialVersion(), affAssignment);
        grp.affinity().initialize(initialVersion(), affAssignment);
        cctx.exchange().exchangerUpdateHeartbeat();
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Aggregations

CacheGroupContext (org.apache.ignite.internal.processors.cache.CacheGroupContext)103 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)31 IgniteEx (org.apache.ignite.internal.IgniteEx)29 Map (java.util.Map)27 HashMap (java.util.HashMap)24 IgniteException (org.apache.ignite.IgniteException)22 ArrayList (java.util.ArrayList)21 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 Test (org.junit.Test)20 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)19 List (java.util.List)17 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)17 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)16 GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology)16 HashSet (java.util.HashSet)13 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)12 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)12 GridCacheSharedContext (org.apache.ignite.internal.processors.cache.GridCacheSharedContext)12 Set (java.util.Set)11 Collection (java.util.Collection)10