Search in sources :

Example 6 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionSupplyMessage method finishUnmarshal.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("ForLoopReplaceableByForEach")
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
    super.finishUnmarshal(ctx, ldr);
    CacheGroupContext grp = ctx.cache().cacheGroup(grpId);
    for (CacheEntryInfoCollection col : infos().values()) {
        List<GridCacheEntryInfo> entries = col.infos();
        for (int i = 0; i < entries.size(); i++) entries.get(i).unmarshal(grp.cacheObjectContext(), ldr);
    }
}
Also used : GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) CacheEntryInfoCollection(org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Example 7 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method processFullMessage.

/**
 * @param checkCrd If {@code true} checks that local node is exchange coordinator.
 * @param node Sender node.
 * @param msg Message.
 */
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
    try {
        assert exchId.equals(msg.exchangeId()) : msg;
        assert msg.lastVersion() != null : msg;
        if (checkCrd) {
            assert node != null;
            synchronized (mux) {
                if (crd == null) {
                    if (log.isInfoEnabled())
                        log.info("Ignore full message, all server nodes left: " + msg);
                    return;
                }
                switch(state) {
                    case CRD:
                    case BECOME_CRD:
                        {
                            if (log.isInfoEnabled())
                                log.info("Ignore full message, node is coordinator: " + msg);
                            return;
                        }
                    case DONE:
                        {
                            if (log.isInfoEnabled())
                                log.info("Ignore full message, future is done: " + msg);
                            return;
                        }
                    case SRV:
                    case CLIENT:
                        {
                            if (!crd.equals(node)) {
                                if (log.isInfoEnabled()) {
                                    log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
                                }
                                if (node.order() > crd.order())
                                    fullMsgs.put(node, msg);
                                return;
                            } else {
                                AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
                                if (log.isInfoEnabled()) {
                                    log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
                                }
                                finishState = new FinishState(crd.id(), resVer, msg);
                                state = ExchangeLocalState.DONE;
                                break;
                            }
                        }
                }
            }
        } else
            assert node == null : node;
        AffinityTopologyVersion resTopVer = initialVersion();
        if (exchCtx.mergeExchanges()) {
            if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
                if (log.isInfoEnabled()) {
                    log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
                }
                resTopVer = msg.resultTopologyVersion();
                if (cctx.exchange().mergeExchanges(this, msg)) {
                    assert cctx.kernalContext().isStopping();
                    // Node is stopping, no need to further process exchange.
                    return;
                }
                if (hasMergedExchanges())
                    updateTopologies(false);
                assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
            }
            exchCtx.events().processEvents(this);
            if (localJoinExchange())
                cctx.affinity().onLocalJoin(this, msg, resTopVer);
            else {
                if (exchCtx.events().hasServerLeft())
                    cctx.affinity().applyAffinityFromFullMessage(this, msg);
                else
                    cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
                for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
                    if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
                        continue;
                    grp.topology().beforeExchange(this, true, false);
                }
            }
        } else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
            cctx.affinity().onLocalJoin(this, msg, resTopVer);
        else if (forceAffReassignment)
            cctx.affinity().applyAffinityFromFullMessage(this, msg);
        updatePartitionFullMap(resTopVer, msg);
        IgniteCheckedException err = null;
        if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap())) {
            err = new IgniteCheckedException("Cluster state change failed");
            cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
        }
        onDone(resTopVer, err);
    } catch (IgniteCheckedException e) {
        onDone(e);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Example 8 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method detectLostPartitions.

/**
 * Detect lost partitions.
 *
 * @param resTopVer Result topology version.
 */
private void detectLostPartitions(AffinityTopologyVersion resTopVer) {
    boolean detected = false;
    synchronized (cctx.exchange().interruptLock()) {
        if (Thread.currentThread().isInterrupted())
            return;
        for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
            if (!grp.isLocal()) {
                boolean detectedOnGrp = grp.topology().detectLostPartitions(resTopVer, events().lastEvent());
                detected |= detectedOnGrp;
            }
        }
    }
    if (detected)
        cctx.exchange().scheduleResendPartitions();
}
Also used : CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext)

Example 9 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method updateTopologies.

/**
 * Updates topology versions and discovery caches on all topologies.
 *
 * @param crd Coordinator flag.
 * @throws IgniteCheckedException If failed.
 */
private void updateTopologies(boolean crd) throws IgniteCheckedException {
    for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
        if (grp.isLocal())
            continue;
        GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
        long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();
        GridDhtPartitionTopology top = grp.topology();
        if (crd) {
            boolean updateTop = exchId.topologyVersion().equals(grp.localStartVersion());
            if (updateTop && clientTop != null) {
                top.update(null, clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.emptySet(), null);
            }
        }
        top.updateTopologyVersion(this, events().discoveryCache(), updSeq, cacheGroupStopping(grp.groupId()));
    }
    for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) top.updateTopologyVersion(this, events().discoveryCache(), -1, cacheGroupStopping(top.groupId()));
}
Also used : GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) GridClientPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology)

Example 10 with CacheGroupContext

use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method onAllReceived.

/**
 * @param sndResNodes Additional nodes to send finish message to.
 */
private void onAllReceived(@Nullable Collection<ClusterNode> sndResNodes) {
    try {
        assert crd.isLocal();
        assert partHistSuppliers.isEmpty() : partHistSuppliers;
        if (!exchCtx.mergeExchanges() && !crd.equals(events().discoveryCache().serverNodes().get(0))) {
            for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
                if (!grp.isLocal())
                    grp.topology().beforeExchange(this, !centralizedAff && !forceAffReassignment, false);
            }
        }
        if (exchCtx.mergeExchanges()) {
            if (log.isInfoEnabled())
                log.info("Coordinator received all messages, try merge [ver=" + initialVersion() + ']');
            boolean finish = cctx.exchange().mergeExchangesOnCoordinator(this);
            // Synchronize in case of changed coordinator (thread switched to sys-*)
            synchronized (mux) {
                if (hasMergedExchanges())
                    updateTopologies(true);
            }
            if (!finish)
                return;
        }
        finishExchangeOnCoordinator(sndResNodes);
    } catch (IgniteCheckedException e) {
        if (reconnectOnError(e))
            onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
        else
            onDone(e);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) IgniteNeedReconnectException(org.apache.ignite.internal.IgniteNeedReconnectException)

Aggregations

CacheGroupContext (org.apache.ignite.internal.processors.cache.CacheGroupContext)21 HashMap (java.util.HashMap)7 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)7 Map (java.util.Map)6 ConcurrentMap (java.util.concurrent.ConcurrentMap)6 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)6 LinkedHashMap (java.util.LinkedHashMap)5 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)5 GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology)5 ArrayList (java.util.ArrayList)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 List (java.util.List)3 UUID (java.util.UUID)3 ClusterNode (org.apache.ignite.cluster.ClusterNode)3 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)3 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)3 HashSet (java.util.HashSet)2 Set (java.util.Set)2 IgniteNeedReconnectException (org.apache.ignite.internal.IgniteNeedReconnectException)2 DiscoveryCustomEvent (org.apache.ignite.internal.events.DiscoveryCustomEvent)2