Search in sources :

Example 61 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridTestUtils method waitTopologyUpdate.

/**
 * @param cacheName Cache name.
 * @param backups Number of backups.
 * @param log Logger.
 * @throws Exception If failed.
 */
@SuppressWarnings("BusyWait")
public static <K, V> void waitTopologyUpdate(@Nullable String cacheName, int backups, IgniteLogger log) throws Exception {
    for (Ignite g : Ignition.allGrids()) {
        IgniteCache<K, V> cache = ((IgniteEx) g).cache(cacheName);
        GridDhtPartitionTopology top = dht(cache).topology();
        while (true) {
            boolean wait = false;
            for (int p = 0; p < g.affinity(cacheName).partitions(); p++) {
                Collection<ClusterNode> nodes = top.nodes(p, AffinityTopologyVersion.NONE);
                if (nodes.size() > backups + 1) {
                    LT.warn(log, "Partition map was not updated yet (will wait) [igniteInstanceName=" + g.name() + ", p=" + p + ", nodes=" + F.nodeIds(nodes) + ']');
                    wait = true;
                    break;
                }
            }
            if (wait)
                Thread.sleep(20);
            else
                // While.
                break;
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) IgniteEx(org.apache.ignite.internal.IgniteEx) Ignite(org.apache.ignite.Ignite)

Example 62 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GatherPartitionStatistics method recollectPartition.

/**
 * Collect some statistics, fix existing in repo and return resulting partition statistics.
 *
 * @param cctx Cache context to get partition from.
 * @param partStat Existing partition statistics to fix or use as a base.
 * @param colsToCollect Columns to collect.
 * @param colsToRemove Columns to remove.
 * @return New partition statistics.
 */
private ObjectPartitionStatisticsImpl recollectPartition(GridCacheContext<?, ?> cctx, ObjectPartitionStatisticsImpl partStat, Map<String, StatisticsColumnConfiguration> colsToCollect, Set<String> colsToRemove) {
    CacheGroupContext grp = cctx.group();
    GridDhtPartitionTopology top = grp.topology();
    AffinityTopologyVersion topVer = top.readyTopologyVersion();
    GridDhtLocalPartition locPart = top.localPartition(partId, topVer, false);
    if (locPart == null)
        throw new GatherStatisticCancelException();
    boolean reserved = locPart.reserve();
    GridH2Table tbl = gathCtx.table();
    ObjectPartitionStatisticsImpl res = null;
    try {
        if (!reserved || (locPart.state() != OWNING)) {
            if (log.isDebugEnabled()) {
                log.debug("Partition not owning. Need to retry [part=" + partId + ", tbl=" + tbl.identifier() + ']');
            }
            throw new GatherStatisticCancelException();
        }
        Column[] cols = IgniteStatisticsHelper.filterColumns(tbl.getColumns(), colsToCollect.keySet());
        ColumnStatisticsCollector[] collectors = new ColumnStatisticsCollector[cols.length];
        for (int i = 0; i < cols.length; ++i) {
            long colCfgVer = colsToCollect.get(cols[i].getName()).version();
            collectors[i] = new ColumnStatisticsCollector(cols[i], tbl::compareTypeSafe, colCfgVer);
        }
        GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
        GridQueryTypeDescriptor typeDesc = rowDesc.type();
        try {
            int checkInt = CANCELLED_CHECK_INTERVAL;
            if (log.isDebugEnabled()) {
                log.debug("Start partition scan [part=" + partId + ", tbl=" + gathCtx.table().identifier() + ']');
            }
            for (CacheDataRow row : grp.offheap().cachePartitionIterator(gathCtx.table().cacheId(), partId, null, false)) {
                if (--checkInt == 0) {
                    if (gathCtx.future().isCancelled())
                        throw new GatherStatisticCancelException();
                    checkInt = CANCELLED_CHECK_INTERVAL;
                }
                if (!typeDesc.matchType(row.value()) || wasExpired(row))
                    continue;
                H2Row h2row = rowDesc.createRow(row);
                for (ColumnStatisticsCollector colStat : collectors) colStat.add(h2row.getValue(colStat.col().getColumnId()));
            }
        } catch (IgniteCheckedException e) {
            log.warning(String.format("Unable to collect partition level statistics by %s.%s:%d due to %s", tbl.identifier().schema(), tbl.identifier().table(), partId, e.getMessage()));
            throw new IgniteException("Unable to collect partition level statistics", e);
        }
        Map<String, ColumnStatistics> colStats = Arrays.stream(collectors).collect(Collectors.toMap(csc -> csc.col().getName(), ColumnStatisticsCollector::finish));
        // Add existing to full replace existing statistics with new one.
        if (partStat != null) {
            for (Map.Entry<String, ColumnStatistics> oldColStat : partStat.columnsStatistics().entrySet()) {
                if (!colsToRemove.contains(oldColStat.getKey()))
                    colStats.putIfAbsent(oldColStat.getKey(), oldColStat.getValue());
            }
        }
        res = new ObjectPartitionStatisticsImpl(partId, getRowCount(colStats), locPart.updateCounter(), colStats);
    } finally {
        if (reserved)
            locPart.release();
    }
    statRepo.replaceLocalPartitionStatistics(gathCtx.configuration().key(), res);
    if (gathCtx.configuration().columns().size() == colsToCollect.size())
        statRepo.refreshObsolescence(gathCtx.configuration().key(), partId);
    return res;
}
Also used : Arrays(java.util.Arrays) IgniteStatisticsRepository(org.apache.ignite.internal.processors.query.stat.IgniteStatisticsRepository) IgniteStatisticsHelper(org.apache.ignite.internal.processors.query.stat.IgniteStatisticsHelper) U(org.apache.ignite.internal.util.typedef.internal.U) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) IgniteLogger(org.apache.ignite.IgniteLogger) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) ColumnStatisticsCollector(org.apache.ignite.internal.processors.query.stat.ColumnStatisticsCollector) ColumnStatistics(org.apache.ignite.internal.processors.query.stat.ColumnStatistics) HashSet(java.util.HashSet) Column(org.h2.table.Column) LocalStatisticsGatheringContext(org.apache.ignite.internal.processors.query.stat.LocalStatisticsGatheringContext) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) GatherStatisticCancelException(org.apache.ignite.internal.processors.query.stat.GatherStatisticCancelException) Map(java.util.Map) StatisticsColumnConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsColumnConfiguration) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) GridQueryTypeDescriptor(org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor) F(org.apache.ignite.internal.util.typedef.F) ObjectPartitionStatisticsImpl(org.apache.ignite.internal.processors.query.stat.ObjectPartitionStatisticsImpl) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) Set(java.util.Set) OWNING(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) Collectors(java.util.stream.Collectors) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) Nullable(org.jetbrains.annotations.Nullable) H2Row(org.apache.ignite.internal.processors.query.h2.opt.H2Row) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) Collections(java.util.Collections) ColumnStatisticsCollector(org.apache.ignite.internal.processors.query.stat.ColumnStatisticsCollector) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GatherStatisticCancelException(org.apache.ignite.internal.processors.query.stat.GatherStatisticCancelException) GridQueryTypeDescriptor(org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor) ObjectPartitionStatisticsImpl(org.apache.ignite.internal.processors.query.stat.ObjectPartitionStatisticsImpl) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Column(org.h2.table.Column) IgniteException(org.apache.ignite.IgniteException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) H2Row(org.apache.ignite.internal.processors.query.h2.opt.H2Row) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) ColumnStatistics(org.apache.ignite.internal.processors.query.stat.ColumnStatistics) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) HashMap(java.util.HashMap) Map(java.util.Map)

Example 63 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCommonAbstractTest method awaitPartitionMapExchange.

/**
 * @param waitEvicts If {@code true} will wait for evictions finished.
 * @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
 * @param nodes Optional nodes. If {@code null} method will wait for all nodes, for non null collection nodes will
 *      be filtered
 * @param printPartState If {@code true} will print partition state if evictions not happened.
 * @param cacheNames Wait for specific caches.
 * @throws InterruptedException If interrupted.
 */
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState, @Nullable Set<String> cacheNames) throws InterruptedException {
    long timeout = getPartitionMapExchangeTimeout();
    long startTime = -1;
    Set<String> names = new HashSet<>();
    Ignite crd = null;
    for (Ignite g : G.allGrids()) {
        ClusterNode node = g.cluster().localNode();
        if (crd == null || node.order() < crd.cluster().localNode().order()) {
            crd = g;
            if (node.order() == 1)
                break;
        }
    }
    if (crd == null)
        return;
    AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
    if (waitTopVer.topologyVersion() <= 0)
        waitTopVer = new AffinityTopologyVersion(1, 0);
    for (Ignite g : G.allGrids()) {
        if (nodes != null && !nodes.contains(g.cluster().localNode()))
            continue;
        IgniteKernal g0 = (IgniteKernal) g;
        names.add(g0.configuration().getIgniteInstanceName());
        if (startTime != -1) {
            if (startTime != g0.context().discovery().gridStartTime())
                fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
        } else
            startTime = g0.context().discovery().gridStartTime();
        if (g.cluster().localNode().isDaemon())
            continue;
        IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
        if (exchFut != null && !exchFut.isDone()) {
            try {
                exchFut.get(timeout);
            } catch (IgniteCheckedException e) {
                log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
            }
        }
        for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
            CacheConfiguration cfg = c.context().config();
            if (cfg == null || cacheNames != null && !cacheNames.contains(cfg.getName()))
                continue;
            if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
                AffinityFunction aff = cfg.getAffinity();
                GridDhtCacheAdapter<?, ?> dht = dht(c);
                GridDhtPartitionTopology top = dht.topology();
                for (int p = 0; p < aff.partitions(); p++) {
                    long start = 0;
                    for (int i = 0; ; i++) {
                        boolean match = false;
                        GridCachePartitionExchangeManager<?, ?> exchMgr = dht.context().shared().exchange();
                        AffinityTopologyVersion readyVer = exchMgr.readyAffinityVersion();
                        // Otherwise, there may be an assertion when printing top.readyTopologyVersion().
                        try {
                            IgniteInternalFuture<?> fut = exchMgr.affinityReadyFuture(readyVer);
                            if (fut != null)
                                fut.get();
                        } catch (IgniteCheckedException e) {
                            throw new IgniteException(e);
                        }
                        if (readyVer.topologyVersion() > 0 && c.context().started()) {
                            // Must map on updated version of topology.
                            List<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
                            int affNodesCnt = affNodes.size();
                            GridDhtTopologyFuture topFut = top.topologyVersionFuture();
                            Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.owners(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
                            int ownerNodesCnt = owners.size();
                            GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
                            boolean notPrimary = !affNodes.isEmpty() && !affNodes.get(0).equals(dht.context().affinity().primaryByPartition(p, readyVer));
                            if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != OWNING) || notPrimary) {
                                if (i % 50 == 0)
                                    LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
                            } else
                                match = true;
                        } else {
                            if (i % 50 == 0)
                                LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                        }
                        if (!match) {
                            if (i == 0)
                                start = System.currentTimeMillis();
                            if (System.currentTimeMillis() - start > timeout) {
                                U.dumpThreads(log);
                                if (printPartState)
                                    printPartitionState(c);
                                throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
                            }
                            // Busy wait.
                            Thread.sleep(20);
                            continue;
                        }
                        if (i > 0)
                            log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
                        break;
                    }
                }
                if (waitNode2PartUpdate) {
                    long start = System.currentTimeMillis();
                    boolean failed = true;
                    while (failed) {
                        failed = false;
                        for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
                            if (failed)
                                break;
                            for (Map.Entry entry : pMap.entrySet()) {
                                if (System.currentTimeMillis() - start > timeout) {
                                    U.dumpThreads(log);
                                    throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
                                }
                                if (entry.getValue() != OWNING) {
                                    LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
                                    // Busy wait.
                                    Thread.sleep(200);
                                    failed = true;
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    log.info("awaitPartitionMapExchange finished");
}
Also used : GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridDhtTopologyFuture(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) Ignite(org.apache.ignite.Ignite) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) HashSet(java.util.HashSet) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) AffinityFunction(org.apache.ignite.cache.affinity.AffinityFunction) Map(java.util.Map) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) TreeMap(java.util.TreeMap) IgniteDhtDemandedPartitionsMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap)

Example 64 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCommonAbstractTest method printPartitionState.

/**
 * @param cacheName Cache name.
 * @param firstParts Count partition for print (will be print first count partition).
 * @param nodes Grid nodes.
 *
 * Print partitionState for cache.
 */
protected static void printPartitionState(String cacheName, int firstParts, List<? extends Ignite> nodes) {
    StringBuilder sb = new StringBuilder();
    sb.append("----preload sync futures----\n");
    for (Ignite ig : nodes) {
        IgniteKernal k = ((IgniteKernal) ig);
        GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
        if (adapter == null)
            continue;
        IgniteInternalFuture<?> syncFut = adapter.preloader().syncFuture();
        sb.append("nodeId=").append(k.context().localNodeId()).append(" consistentId=").append(k.localNode().consistentId()).append(" isDone=").append(syncFut.isDone()).append("\n");
    }
    sb.append("----rebalance futures----\n");
    for (Ignite ig : nodes) {
        IgniteKernal k = ((IgniteKernal) ig);
        GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
        if (adapter == null)
            continue;
        IgniteInternalFuture<?> f = adapter.preloader().rebalanceFuture();
        try {
            sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? String.valueOf(U.<Object>field(f, "topVer")) : "N/A")).append("\n");
            Map<UUID, IgniteDhtDemandedPartitionsMap> remaining = U.field(f, "remaining");
            sb.append("remaining: ");
            sb.append(remaining.toString());
            sb.append("\n");
        } catch (Throwable e) {
            log.error(e.getMessage());
        }
    }
    sb.append("----partition state----\n");
    for (Ignite g : nodes) {
        IgniteKernal g0 = (IgniteKernal) g;
        sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
        IgniteCacheProxy<?, ?> cache = null;
        try {
            cache = g0.context().cache().jcache(cacheName);
        } catch (IllegalArgumentException e) {
            // Client topology.
            continue;
        }
        GridDhtCacheAdapter<?, ?> dht = dht(cache);
        GridDhtPartitionTopology top = dht.topology();
        int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
        for (int p = 0; p < parts; p++) {
            AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
            Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
            GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
            sb.append("local part=");
            if (part != null) {
                sb.append(p).append(" counters=").append(part.dataStore().partUpdateCounter()).append(" fullSize=").append(part.fullSize()).append(" state=").append(part.state()).append(" reservations=").append(part.reservations());
            } else
                sb.append(p).append(" is null");
            sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
            for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
                if (!nodeId.equals(g0.localNode().id())) {
                    top.readLock();
                    // Peek to remote state directly to distinguish if a partition is EVICTED or yet not initialized.
                    GridDhtPartitionFullMap map = U.field(top, "node2part");
                    try {
                        final GridDhtPartitionMap nodeMap = map.get(nodeId);
                        if (nodeMap == null)
                            // Skip client node.
                            continue;
                        final GridDhtPartitionState rmtState = nodeMap.get(p);
                        if (rmtState != null) {
                            sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(rmtState).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
                        } else {
                            sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" is null").append("\n");
                        }
                    } finally {
                        top.readUnlock();
                    }
                }
            }
        }
        sb.append("\n");
    }
    log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
Also used : IgniteKernal(org.apache.ignite.internal.IgniteKernal) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) IgniteDhtDemandedPartitionsMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap) GridDhtPartitionState(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState) Ignite(org.apache.ignite.Ignite) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) UUID(java.util.UUID)

Aggregations

GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology)64 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)24 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)21 ClusterNode (org.apache.ignite.cluster.ClusterNode)20 Map (java.util.Map)18 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)18 CacheGroupContext (org.apache.ignite.internal.processors.cache.CacheGroupContext)17 HashMap (java.util.HashMap)15 ArrayList (java.util.ArrayList)14 Ignite (org.apache.ignite.Ignite)14 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)12 Test (org.junit.Test)12 IgniteEx (org.apache.ignite.internal.IgniteEx)11 UUID (java.util.UUID)10 IgniteKernal (org.apache.ignite.internal.IgniteKernal)10 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 IgniteException (org.apache.ignite.IgniteException)9 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)9 GridDhtPartitionMap (org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap)9 HashSet (java.util.HashSet)8