Search in sources :

Example 11 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridCommonAbstractTest method awaitPartitionMapExchange.

/**
     * @param waitEvicts If {@code true} will wait for evictions finished.
     * @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
     * @param nodes Optional nodes.
     * @param printPartState If {@code true} will print partition state if evictions not happened.
     * @throws InterruptedException If interrupted.
     */
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState) throws InterruptedException {
    long timeout = 30_000;
    long startTime = -1;
    Set<String> names = new HashSet<>();
    Ignite crd = null;
    for (Ignite g : G.allGrids()) {
        ClusterNode node = g.cluster().localNode();
        if (crd == null || node.order() < crd.cluster().localNode().order()) {
            crd = g;
            if (node.order() == 1)
                break;
        }
    }
    if (crd == null)
        return;
    AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
    if (waitTopVer.topologyVersion() <= 0)
        waitTopVer = new AffinityTopologyVersion(1, 0);
    for (Ignite g : G.allGrids()) {
        if (nodes != null && !nodes.contains(g.cluster().localNode()))
            continue;
        IgniteKernal g0 = (IgniteKernal) g;
        names.add(g0.configuration().getIgniteInstanceName());
        if (startTime != -1) {
            if (startTime != g0.context().discovery().gridStartTime())
                fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
        } else
            startTime = g0.context().discovery().gridStartTime();
        IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
        if (exchFut != null && !exchFut.isDone()) {
            try {
                exchFut.get(timeout);
            } catch (IgniteCheckedException e) {
                log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
            }
        }
        for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
            CacheConfiguration cfg = c.context().config();
            if (cfg == null)
                continue;
            if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
                AffinityFunction aff = cfg.getAffinity();
                GridDhtCacheAdapter<?, ?> dht = dht(c);
                GridDhtPartitionTopology top = dht.topology();
                for (int p = 0; p < aff.partitions(); p++) {
                    long start = 0;
                    for (int i = 0; ; i++) {
                        boolean match = false;
                        AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
                        if (readyVer.topologyVersion() > 0 && c.context().started()) {
                            // Must map on updated version of topology.
                            Collection<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
                            int affNodesCnt = affNodes.size();
                            GridDhtTopologyFuture topFut = top.topologyVersionFuture();
                            Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.nodes(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
                            int ownerNodesCnt = owners.size();
                            GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
                            if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != GridDhtPartitionState.OWNING)) {
                                LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
                            } else
                                match = true;
                        } else {
                            LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                        }
                        if (!match) {
                            if (i == 0)
                                start = System.currentTimeMillis();
                            if (System.currentTimeMillis() - start > timeout) {
                                U.dumpThreads(log);
                                if (printPartState)
                                    printPartitionState(c);
                                throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                            }
                            // Busy wait.
                            Thread.sleep(20);
                            continue;
                        }
                        if (i > 0)
                            log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
                        break;
                    }
                }
                if (waitNode2PartUpdate) {
                    long start = System.currentTimeMillis();
                    boolean failed = true;
                    while (failed) {
                        failed = false;
                        for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
                            if (failed)
                                break;
                            for (Map.Entry entry : pMap.entrySet()) {
                                if (System.currentTimeMillis() - start > timeout) {
                                    U.dumpThreads(log);
                                    throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
                                }
                                if (entry.getValue() != GridDhtPartitionState.OWNING) {
                                    LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
                                    // Busy wait.
                                    Thread.sleep(200);
                                    failed = true;
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    log.info("awaitPartitionMapExchange finished");
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) GridDhtTopologyFuture(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) Ignite(org.apache.ignite.Ignite) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) AffinityFunction(org.apache.ignite.cache.affinity.AffinityFunction) Map(java.util.Map) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) HashSet(java.util.HashSet)

Example 12 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class CacheDhtLocalPartitionAfterRemoveSelfTest method testMemoryUsage.

/**
     * @throws Exception If failed.
     */
public void testMemoryUsage() throws Exception {
    assertEquals(10_000, GridDhtLocalPartition.MAX_DELETE_QUEUE_SIZE);
    IgniteCache<TestKey, Integer> cache = grid(0).cache(DEFAULT_CACHE_NAME);
    for (int i = 0; i < 20_000; ++i) cache.put(new TestKey(String.valueOf(i)), i);
    for (int i = 0; i < 20_000; ++i) assertEquals((Object) i, cache.getAndRemove(new TestKey(String.valueOf(i))));
    assertEquals(0, cache.size());
    for (int g = 0; g < GRID_CNT; g++) {
        cache = grid(g).cache(DEFAULT_CACHE_NAME);
        for (GridDhtLocalPartition p : dht(cache).topology().localPartitions()) {
            int size = p.dataStore().size();
            assertTrue("Unexpected size: " + size, size <= 32);
        }
    }
}
Also used : GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 13 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class NoneRebalanceModeSelfTest method testRemoveAll.

/**
     * @throws Exception If failed.
     */
public void testRemoveAll() throws Exception {
    GridNearTransactionalCache cache = (GridNearTransactionalCache) ((IgniteKernal) grid(0)).internalCache(DEFAULT_CACHE_NAME);
    for (GridDhtLocalPartition part : cache.dht().topology().localPartitions()) assertEquals(OWNING, part.state());
    grid(0).cache(DEFAULT_CACHE_NAME).removeAll();
}
Also used : GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 14 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method assignPartitionStates.

/**
     * @param top Topology to assign.
     */
private void assignPartitionStates(GridDhtPartitionTopology top) {
    Map<Integer, CounterWithNodes> maxCntrs = new HashMap<>();
    for (Map.Entry<UUID, GridDhtPartitionsAbstractMessage> e : msgs.entrySet()) {
        assert e.getValue().partitionUpdateCounters(top.cacheId()) != null;
        for (Map.Entry<Integer, T2<Long, Long>> e0 : e.getValue().partitionUpdateCounters(top.cacheId()).entrySet()) {
            int p = e0.getKey();
            UUID uuid = e.getKey();
            GridDhtPartitionState state = top.partitionState(uuid, p);
            if (state != GridDhtPartitionState.OWNING)
                continue;
            Long cntr = e0.getValue().get1();
            if (cntr == null)
                continue;
            CounterWithNodes maxCntr = maxCntrs.get(p);
            if (maxCntr == null || cntr > maxCntr.cnt)
                maxCntrs.put(p, new CounterWithNodes(cntr, uuid));
            else if (cntr == maxCntr.cnt)
                maxCntr.nodes.add(uuid);
        }
    }
    // Also must process counters from the local node.
    for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
        GridDhtPartitionState state = top.partitionState(cctx.localNodeId(), part.id());
        if (state != GridDhtPartitionState.OWNING)
            continue;
        CounterWithNodes maxCntr = maxCntrs.get(part.id());
        if (maxCntr == null || part.initialUpdateCounter() > maxCntr.cnt)
            maxCntrs.put(part.id(), new CounterWithNodes(part.updateCounter(), cctx.localNodeId()));
        else if (part.initialUpdateCounter() == maxCntr.cnt)
            maxCntr.nodes.add(cctx.localNodeId());
    }
    int entryLeft = maxCntrs.size();
    for (Map.Entry<Integer, CounterWithNodes> e : maxCntrs.entrySet()) {
        int p = e.getKey();
        long maxCntr = e.getValue().cnt;
        entryLeft--;
        if (entryLeft != 0 && maxCntr == 0)
            continue;
        top.setOwners(p, e.getValue().nodes, entryLeft == 0);
    }
}
Also used : HashMap(java.util.HashMap) GridDhtPartitionState(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) UUID(java.util.UUID) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) T2(org.apache.ignite.internal.util.typedef.T2)

Example 15 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridDhtForceKeysFuture method map.

/**
     * @param key Key.
     * @param exc Exclude nodes.
     * @param mappings Mappings.
     * @return Mappings.
     */
private Map<ClusterNode, Set<KeyCacheObject>> map(KeyCacheObject key, @Nullable Map<ClusterNode, Set<KeyCacheObject>> mappings, Collection<ClusterNode> exc) {
    ClusterNode loc = cctx.localNode();
    GridCacheEntryEx e = cctx.dht().peekEx(key);
    try {
        if (e != null && !e.isNewLocked()) {
            if (log.isDebugEnabled()) {
                int part = cctx.affinity().partition(key);
                log.debug("Will not rebalance key (entry is not new) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
            }
            // Key has been rebalanced or retrieved already.
            return mappings;
        }
    } catch (GridCacheEntryRemovedException ignore) {
        if (log.isDebugEnabled())
            log.debug("Received removed DHT entry for force keys request [entry=" + e + ", locId=" + cctx.nodeId() + ']');
    }
    int part = cctx.affinity().partition(key);
    List<ClusterNode> owners = F.isEmpty(exc) ? top.owners(part, topVer) : new ArrayList<>(F.view(top.owners(part, topVer), F.notIn(exc)));
    if (owners.isEmpty() || (owners.contains(loc) && cctx.rebalanceEnabled())) {
        if (log.isDebugEnabled())
            log.debug("Will not rebalance key (local node is owner) [key=" + key + ", part=" + part + "topVer=" + topVer + ", locId=" + cctx.nodeId() + ']');
        // Key is already rebalanced.
        return mappings;
    }
    // Create partition.
    GridDhtLocalPartition locPart = top.localPartition(part, topVer, false);
    if (log.isDebugEnabled())
        log.debug("Mapping local partition [loc=" + cctx.localNodeId() + ", topVer" + topVer + ", part=" + locPart + ", owners=" + owners + ", allOwners=" + U.toShortString(top.owners(part)) + ']');
    if (locPart == null)
        invalidParts.add(part);
    else if (!cctx.rebalanceEnabled() || locPart.state() == MOVING) {
        Collections.sort(owners, CU.nodeComparator(false));
        // Load from youngest owner.
        ClusterNode pick = F.first(owners);
        assert pick != null;
        if (!cctx.rebalanceEnabled() && loc.id().equals(pick.id()))
            pick = F.first(F.view(owners, F.remoteNodes(loc.id())));
        if (pick == null) {
            if (log.isDebugEnabled())
                log.debug("Will not rebalance key (no nodes to request from with rebalancing disabled) [key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
            return mappings;
        }
        if (mappings == null)
            mappings = U.newHashMap(keys.size());
        Collection<KeyCacheObject> mappedKeys = F.addIfAbsent(mappings, pick, F.<KeyCacheObject>newSet());
        assert mappedKeys != null;
        mappedKeys.add(key);
        if (log.isDebugEnabled())
            log.debug("Will rebalance key from node [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", node=" + pick.id() + ", locId=" + cctx.nodeId() + ']');
    } else if (locPart.state() != OWNING)
        invalidParts.add(part);
    else {
        if (log.isDebugEnabled())
            log.debug("Will not rebalance key (local partition is not MOVING) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + locPart + ", locId=" + cctx.nodeId() + ']');
    }
    return mappings;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) Collection(java.util.Collection) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Aggregations

GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)25 ClusterNode (org.apache.ignite.cluster.ClusterNode)8 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)8 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)6 GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology)6 CacheDataRow (org.apache.ignite.internal.processors.cache.database.CacheDataRow)5 Map (java.util.Map)4 GridCacheEntryInfo (org.apache.ignite.internal.processors.cache.GridCacheEntryInfo)4 Collection (java.util.Collection)3 HashMap (java.util.HashMap)3 UUID (java.util.UUID)3 Ignite (org.apache.ignite.Ignite)3 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)3 IgniteKernal (org.apache.ignite.internal.IgniteKernal)3 GridCacheEntryEx (org.apache.ignite.internal.processors.cache.GridCacheEntryEx)3 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)3 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)3 GridDhtInvalidPartitionException (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException)3 T2 (org.apache.ignite.internal.util.typedef.T2)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2