Search in sources :

Example 56 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtAtomicCache method start.

/**
 * {@inheritDoc}
 */
@SuppressWarnings({ "IfMayBeConditional", "SimplifiableIfStatement" })
@Override
public void start() throws IgniteCheckedException {
    super.start();
    CacheMetricsImpl m = new CacheMetricsImpl(ctx);
    if (ctx.dht().near() != null)
        m.delegate(ctx.dht().near().metrics0());
    metrics = m;
    ctx.io().addCacheHandler(ctx.cacheId(), GridNearGetRequest.class, new CI2<UUID, GridNearGetRequest>() {

        @Override
        public void apply(UUID nodeId, GridNearGetRequest req) {
            processNearGetRequest(nodeId, req);
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridNearSingleGetRequest.class, new CI2<UUID, GridNearSingleGetRequest>() {

        @Override
        public void apply(UUID nodeId, GridNearSingleGetRequest req) {
            processNearSingleGetRequest(nodeId, req);
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicAbstractUpdateRequest.class, new CI2<UUID, GridNearAtomicAbstractUpdateRequest>() {

        @Override
        public void apply(UUID nodeId, GridNearAtomicAbstractUpdateRequest req) {
            processNearAtomicUpdateRequest(nodeId, req);
        }

        @Override
        public String toString() {
            return "GridNearAtomicAbstractUpdateRequest handler " + "[msgIdx=" + GridNearAtomicAbstractUpdateRequest.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicUpdateResponse.class, new CI2<UUID, GridNearAtomicUpdateResponse>() {

        @Override
        public void apply(UUID nodeId, GridNearAtomicUpdateResponse res) {
            processNearAtomicUpdateResponse(nodeId, res);
        }

        @Override
        public String toString() {
            return "GridNearAtomicUpdateResponse handler " + "[msgIdx=" + GridNearAtomicUpdateResponse.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicAbstractUpdateRequest.class, new CI2<UUID, GridDhtAtomicAbstractUpdateRequest>() {

        @Override
        public void apply(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
            processDhtAtomicUpdateRequest(nodeId, req);
        }

        @Override
        public String toString() {
            return "GridDhtAtomicUpdateRequest handler " + "[msgIdx=" + GridDhtAtomicUpdateRequest.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicUpdateResponse.class, new CI2<UUID, GridDhtAtomicUpdateResponse>() {

        @Override
        public void apply(UUID nodeId, GridDhtAtomicUpdateResponse res) {
            processDhtAtomicUpdateResponse(nodeId, res);
        }

        @Override
        public String toString() {
            return "GridDhtAtomicUpdateResponse handler " + "[msgIdx=" + GridDhtAtomicUpdateResponse.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicDeferredUpdateResponse.class, new CI2<UUID, GridDhtAtomicDeferredUpdateResponse>() {

        @Override
        public void apply(UUID nodeId, GridDhtAtomicDeferredUpdateResponse res) {
            processDhtAtomicDeferredUpdateResponse(nodeId, res);
        }

        @Override
        public String toString() {
            return "GridDhtAtomicDeferredUpdateResponse handler " + "[msgIdx=" + GridDhtAtomicDeferredUpdateResponse.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicNearResponse.class, new CI2<UUID, GridDhtAtomicNearResponse>() {

        @Override
        public void apply(UUID uuid, GridDhtAtomicNearResponse msg) {
            processDhtAtomicNearResponse(uuid, msg);
        }

        @Override
        public String toString() {
            return "GridDhtAtomicNearResponse handler " + "[msgIdx=" + GridDhtAtomicNearResponse.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicCheckUpdateRequest.class, new CI2<UUID, GridNearAtomicCheckUpdateRequest>() {

        @Override
        public void apply(UUID uuid, GridNearAtomicCheckUpdateRequest msg) {
            processCheckUpdateRequest(uuid, msg);
        }

        @Override
        public String toString() {
            return "GridNearAtomicCheckUpdateRequest handler " + "[msgIdx=" + GridNearAtomicCheckUpdateRequest.CACHE_MSG_IDX + ']';
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtForceKeysRequest.class, new MessageHandler<GridDhtForceKeysRequest>() {

        @Override
        public void onMessage(ClusterNode node, GridDhtForceKeysRequest msg) {
            processForceKeysRequest(node, msg);
        }
    });
    ctx.io().addCacheHandler(ctx.cacheId(), GridDhtForceKeysResponse.class, new MessageHandler<GridDhtForceKeysResponse>() {

        @Override
        public void onMessage(ClusterNode node, GridDhtForceKeysResponse msg) {
            processForceKeyResponse(node, msg);
        }
    });
    if (near == null) {
        ctx.io().addCacheHandler(ctx.cacheId(), GridNearGetResponse.class, new CI2<UUID, GridNearGetResponse>() {

            @Override
            public void apply(UUID nodeId, GridNearGetResponse res) {
                processNearGetResponse(nodeId, res);
            }
        });
        ctx.io().addCacheHandler(ctx.cacheId(), GridNearSingleGetResponse.class, new CI2<UUID, GridNearSingleGetResponse>() {

            @Override
            public void apply(UUID nodeId, GridNearSingleGetResponse res) {
                processNearSingleGetResponse(nodeId, res);
            }
        });
    }
}
Also used : GridDhtForceKeysResponse(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysResponse) CacheMetricsImpl(org.apache.ignite.internal.processors.cache.CacheMetricsImpl) GridDhtForceKeysRequest(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysRequest) GridNearGetRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest) UUID(java.util.UUID) GridNearGetResponse(org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse) ClusterNode(org.apache.ignite.cluster.ClusterNode) GridNearSingleGetResponse(org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetResponse) GridNearSingleGetRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest)

Example 57 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridAffinityAssignmentCache method currentBaselineAssignment.

/**
 * @param topVer Topology version.
 * @return Baseline assignment with filtered out offline nodes.
 */
private List<List<ClusterNode>> currentBaselineAssignment(AffinityTopologyVersion topVer) {
    Map<Object, ClusterNode> alives = new HashMap<>();
    for (ClusterNode node : ctx.discovery().nodes(topVer)) {
        if (!node.isClient() && !node.isDaemon())
            alives.put(node.consistentId(), node);
    }
    List<List<ClusterNode>> result = new ArrayList<>(baselineAssignment.size());
    for (int p = 0; p < baselineAssignment.size(); p++) {
        List<ClusterNode> baselineMapping = baselineAssignment.get(p);
        List<ClusterNode> currentMapping = null;
        for (ClusterNode node : baselineMapping) {
            ClusterNode aliveNode = alives.get(node.consistentId());
            if (aliveNode != null) {
                if (currentMapping == null)
                    currentMapping = new ArrayList<>();
                currentMapping.add(aliveNode);
            }
        }
        result.add(p, currentMapping != null ? currentMapping : Collections.<ClusterNode>emptyList());
    }
    return result;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List)

Example 58 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtPartitionDemander method handleSupplyMessage.

/**
 * Handles supply message from {@code nodeId} with specified {@code topicId}.
 *
 * Supply message contains entries to populate rebalancing partitions.
 *
 * There is a cyclic process:
 * Populate rebalancing partitions with entries from Supply message.
 * If not all partitions specified in {@link #rebalanceFut} were rebalanced or marked as missed
 * send new Demand message to request next batch of entries.
 *
 * @param topicId Topic id.
 * @param nodeId Node id.
 * @param supply Supply message.
 */
public void handleSupplyMessage(int topicId, final UUID nodeId, final GridDhtPartitionSupplyMessage supply) {
    AffinityTopologyVersion topVer = supply.topologyVersion();
    final RebalanceFuture fut = rebalanceFut;
    ClusterNode node = ctx.node(nodeId);
    if (node == null)
        return;
    if (// Topology already changed (for the future that supply message based on).
    topologyChanged(fut))
        return;
    if (!fut.isActual(supply.rebalanceId())) {
        // Supple message based on another future.
        return;
    }
    if (log.isDebugEnabled())
        log.debug("Received supply message [grp=" + grp.cacheOrGroupName() + ", msg=" + supply + ']');
    // Check whether there were class loading errors on unmarshal
    if (supply.classError() != null) {
        U.warn(log, "Rebalancing from node cancelled [grp=" + grp.cacheOrGroupName() + ", node=" + nodeId + "]. Class got undeployed during preloading: " + supply.classError());
        fut.cancel(nodeId);
        return;
    }
    final GridDhtPartitionTopology top = grp.topology();
    if (grp.sharedGroup()) {
        for (GridCacheContext cctx : grp.caches()) {
            if (cctx.statisticsEnabled()) {
                long keysCnt = supply.keysForCache(cctx.cacheId());
                if (keysCnt != -1)
                    cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(keysCnt);
                // Can not be calculated per cache.
                cctx.cache().metrics0().onRebalanceBatchReceived(supply.messageSize());
            }
        }
    } else {
        GridCacheContext cctx = grp.singleCacheContext();
        if (cctx.statisticsEnabled()) {
            if (supply.estimatedKeysCount() != -1)
                cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(supply.estimatedKeysCount());
            cctx.cache().metrics0().onRebalanceBatchReceived(supply.messageSize());
        }
    }
    try {
        AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
        GridCacheContext cctx = grp.sharedGroup() ? null : grp.singleCacheContext();
        // Preload.
        for (Map.Entry<Integer, CacheEntryInfoCollection> e : supply.infos().entrySet()) {
            int p = e.getKey();
            if (aff.get(p).contains(ctx.localNode())) {
                GridDhtLocalPartition part = top.localPartition(p, topVer, true);
                assert part != null;
                boolean last = supply.last().containsKey(p);
                if (part.state() == MOVING) {
                    boolean reserved = part.reserve();
                    assert reserved : "Failed to reserve partition [igniteInstanceName=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", part=" + part + ']';
                    part.lock();
                    try {
                        // Loop through all received entries and try to preload them.
                        for (GridCacheEntryInfo entry : e.getValue().infos()) {
                            if (!preloadEntry(node, p, entry, topVer)) {
                                if (log.isDebugEnabled())
                                    log.debug("Got entries for invalid partition during " + "preloading (will skip) [p=" + p + ", entry=" + entry + ']');
                                break;
                            }
                            if (grp.sharedGroup() && (cctx == null || cctx.cacheId() != entry.cacheId()))
                                cctx = ctx.cacheContext(entry.cacheId());
                            if (cctx != null && cctx.statisticsEnabled())
                                cctx.cache().metrics0().onRebalanceKeyReceived();
                        }
                        // then we take ownership.
                        if (last) {
                            top.own(part);
                            fut.partitionDone(nodeId, p);
                            if (log.isDebugEnabled())
                                log.debug("Finished rebalancing partition: " + part);
                        }
                    } finally {
                        part.unlock();
                        part.release();
                    }
                } else {
                    if (last)
                        fut.partitionDone(nodeId, p);
                    if (log.isDebugEnabled())
                        log.debug("Skipping rebalancing partition (state is not MOVING): " + part);
                }
            } else {
                fut.partitionDone(nodeId, p);
                if (log.isDebugEnabled())
                    log.debug("Skipping rebalancing partition (it does not belong on current node): " + p);
            }
        }
        // Only request partitions based on latest topology version.
        for (Integer miss : supply.missed()) {
            if (aff.get(miss).contains(ctx.localNode()))
                fut.partitionMissed(nodeId, miss);
        }
        for (Integer miss : supply.missed()) fut.partitionDone(nodeId, miss);
        GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage(supply.rebalanceId(), supply.topologyVersion(), grp.groupId());
        d.timeout(grp.config().getRebalanceTimeout());
        d.topic(rebalanceTopics.get(topicId));
        if (!topologyChanged(fut) && !fut.isDone()) {
            // Send demand message.
            try {
                ctx.io().sendOrderedMessage(node, rebalanceTopics.get(topicId), d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.config().getRebalanceTimeout());
            } catch (ClusterTopologyCheckedException e) {
                if (log.isDebugEnabled()) {
                    log.debug("Node left during rebalancing [grp=" + grp.cacheOrGroupName() + ", node=" + node.id() + ", msg=" + e.getMessage() + ']');
                }
            }
        }
    } catch (IgniteSpiException | IgniteCheckedException e) {
        LT.error(log, e, "Error during rebalancing [grp=" + grp.cacheOrGroupName() + ", srcNode=" + node.id() + ", err=" + e + ']');
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheEntryInfoCollection(org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) Map(java.util.Map) HashMap(java.util.HashMap) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 59 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridCommonAbstractTest method movingKeysAfterJoin.

/**
 * Return list of keys that are primary for given node on current topology,
 * but primary node will change after new node will be added.
 *
 * @param ign Ignite.
 * @param cacheName Cache name.
 * @param size Number of keys.
 * @return List of keys.
 */
protected final List<Integer> movingKeysAfterJoin(Ignite ign, String cacheName, int size) {
    assertEquals("Expected consistentId is set to node name", ign.name(), ign.cluster().localNode().consistentId());
    GridCacheContext<Object, Object> cctx = ((IgniteKernal) ign).context().cache().internalCache(cacheName).context();
    ArrayList<ClusterNode> nodes = new ArrayList<>(ign.cluster().nodes());
    AffinityFunction func = cctx.config().getAffinity();
    AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(nodes, null, null, AffinityTopologyVersion.NONE, cctx.config().getBackups());
    List<List<ClusterNode>> calcAff = func.assignPartitions(ctx);
    GridTestNode fakeNode = new GridTestNode(UUID.randomUUID(), null);
    fakeNode.consistentId(getTestIgniteInstanceName(nodes.size()));
    nodes.add(fakeNode);
    ctx = new GridAffinityFunctionContextImpl(nodes, null, null, AffinityTopologyVersion.NONE, cctx.config().getBackups());
    List<List<ClusterNode>> calcAff2 = func.assignPartitions(ctx);
    Set<Integer> movedParts = new HashSet<>();
    UUID locId = ign.cluster().localNode().id();
    for (int i = 0; i < calcAff.size(); i++) {
        if (calcAff.get(i).get(0).id().equals(locId) && !calcAff2.get(i).get(0).id().equals(locId))
            movedParts.add(i);
    }
    List<Integer> keys = new ArrayList<>();
    Affinity<Integer> aff = ign.affinity(cacheName);
    for (int i = 0; i < 10_000; i++) {
        int keyPart = aff.partition(i);
        if (movedParts.contains(keyPart)) {
            keys.add(i);
            if (keys.size() == size)
                break;
        }
    }
    assertEquals("Failed to find moving keys [movedPats=" + movedParts + ", keys=" + keys + ']', size, keys.size());
    return keys;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ArrayList(java.util.ArrayList) AffinityFunctionContext(org.apache.ignite.cache.affinity.AffinityFunctionContext) GridTestNode(org.apache.ignite.testframework.GridTestNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) GridAffinityFunctionContextImpl(org.apache.ignite.internal.processors.affinity.GridAffinityFunctionContextImpl) List(java.util.List) ArrayList(java.util.ArrayList) UUID(java.util.UUID) AffinityFunction(org.apache.ignite.cache.affinity.AffinityFunction) HashSet(java.util.HashSet)

Example 60 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridCommonAbstractTest method awaitPartitionMapExchange.

/**
 * @param waitEvicts If {@code true} will wait for evictions finished.
 * @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
 * @param nodes Optional nodes. If {@code null} method will wait for all nodes, for non null collection nodes will
 *      be filtered
 * @param printPartState If {@code true} will print partition state if evictions not happened.
 * @throws InterruptedException If interrupted.
 */
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState) throws InterruptedException {
    long timeout = getPartitionMapExchangeTimeout();
    long startTime = -1;
    Set<String> names = new HashSet<>();
    Ignite crd = null;
    for (Ignite g : G.allGrids()) {
        ClusterNode node = g.cluster().localNode();
        if (crd == null || node.order() < crd.cluster().localNode().order()) {
            crd = g;
            if (node.order() == 1)
                break;
        }
    }
    if (crd == null)
        return;
    AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
    if (waitTopVer.topologyVersion() <= 0)
        waitTopVer = new AffinityTopologyVersion(1, 0);
    for (Ignite g : G.allGrids()) {
        if (nodes != null && !nodes.contains(g.cluster().localNode()))
            continue;
        IgniteKernal g0 = (IgniteKernal) g;
        names.add(g0.configuration().getIgniteInstanceName());
        if (startTime != -1) {
            if (startTime != g0.context().discovery().gridStartTime())
                fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
        } else
            startTime = g0.context().discovery().gridStartTime();
        if (g.cluster().localNode().isDaemon())
            continue;
        IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
        if (exchFut != null && !exchFut.isDone()) {
            try {
                exchFut.get(timeout);
            } catch (IgniteCheckedException e) {
                log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
            }
        }
        for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
            CacheConfiguration cfg = c.context().config();
            if (cfg == null)
                continue;
            if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
                AffinityFunction aff = cfg.getAffinity();
                GridDhtCacheAdapter<?, ?> dht = dht(c);
                GridDhtPartitionTopology top = dht.topology();
                for (int p = 0; p < aff.partitions(); p++) {
                    long start = 0;
                    for (int i = 0; ; i++) {
                        boolean match = false;
                        GridCachePartitionExchangeManager<?, ?> exchMgr = dht.context().shared().exchange();
                        AffinityTopologyVersion readyVer = exchMgr.readyAffinityVersion();
                        // Otherwise, there may be an assertion when printing top.readyTopologyVersion().
                        try {
                            IgniteInternalFuture<?> fut = exchMgr.affinityReadyFuture(readyVer);
                            if (fut != null)
                                fut.get();
                        } catch (IgniteCheckedException e) {
                            throw new IgniteException(e);
                        }
                        if (readyVer.topologyVersion() > 0 && c.context().started()) {
                            // Must map on updated version of topology.
                            Collection<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
                            int affNodesCnt = affNodes.size();
                            GridDhtTopologyFuture topFut = top.topologyVersionFuture();
                            Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.owners(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
                            int ownerNodesCnt = owners.size();
                            GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
                            if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != GridDhtPartitionState.OWNING)) {
                                LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
                            } else
                                match = true;
                        } else {
                            LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                        }
                        if (!match) {
                            if (i == 0)
                                start = System.currentTimeMillis();
                            if (System.currentTimeMillis() - start > timeout) {
                                U.dumpThreads(log);
                                if (printPartState)
                                    printPartitionState(c);
                                throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                            }
                            // Busy wait.
                            Thread.sleep(20);
                            continue;
                        }
                        if (i > 0)
                            log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
                        break;
                    }
                }
                if (waitNode2PartUpdate) {
                    long start = System.currentTimeMillis();
                    boolean failed = true;
                    while (failed) {
                        failed = false;
                        for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
                            if (failed)
                                break;
                            for (Map.Entry entry : pMap.entrySet()) {
                                if (System.currentTimeMillis() - start > timeout) {
                                    U.dumpThreads(log);
                                    throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
                                }
                                if (entry.getValue() != GridDhtPartitionState.OWNING) {
                                    LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
                                    // Busy wait.
                                    Thread.sleep(200);
                                    failed = true;
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    log.info("awaitPartitionMapExchange finished");
}
Also used : GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) GridDhtTopologyFuture(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) Ignite(org.apache.ignite.Ignite) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) HashSet(java.util.HashSet) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) AffinityFunction(org.apache.ignite.cache.affinity.AffinityFunction) Map(java.util.Map) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap)

Aggregations

ClusterNode (org.apache.ignite.cluster.ClusterNode)1104 UUID (java.util.UUID)281 ArrayList (java.util.ArrayList)280 Test (org.junit.Test)276 Ignite (org.apache.ignite.Ignite)239 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)239 HashMap (java.util.HashMap)184 Map (java.util.Map)182 List (java.util.List)165 IgniteException (org.apache.ignite.IgniteException)147 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)147 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)143 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)126 Collection (java.util.Collection)113 Message (org.apache.ignite.plugin.extensions.communication.Message)106 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)87 HashSet (java.util.HashSet)85 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)82 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)81 IgniteEx (org.apache.ignite.internal.IgniteEx)81