Search in sources :

Example 1 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
                // Retry.
                return null;
            throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
                        // Retry.
                        return null;
                    throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty())
                        // Intersection is empty -> retry.
                        return null;
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty())
                    // Retry.
                    return null;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ResultSet(java.sql.ResultSet) Set(java.util.Set) HashSet(java.util.HashSet) CacheException(javax.cache.CacheException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Example 2 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method stableDataNodes.

/**
 * @param isReplicatedOnly If we must only have replicated caches.
 * @param topVer Topology version.
 * @param cacheIds Participating cache IDs.
 * @param parts Partitions.
 * @return Data nodes or {@code null} if repartitioning started and we need to retry.
 */
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
    GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
    Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
    Set<ClusterNode> nodes = map.keySet();
    if (F.isEmpty(map))
        throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
    for (int i = 1; i < cacheIds.size(); i++) {
        GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
        String extraCacheName = extraCctx.name();
        if (extraCctx.isLocal())
            // No consistency guaranties for local caches.
            continue;
        if (isReplicatedOnly && !extraCctx.isReplicated())
            throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
        Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
        if (F.isEmpty(extraNodes))
            throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
        boolean disjoint;
        if (extraCctx.isReplicated()) {
            if (isReplicatedOnly) {
                nodes.retainAll(extraNodes);
                disjoint = map.isEmpty();
            } else
                disjoint = !extraNodes.containsAll(nodes);
        } else
            disjoint = !extraNodes.equals(nodes);
        if (disjoint) {
            if (isPreloadingActive(cacheIds))
                // Retry.
                return null;
            else
                throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
        }
    }
    return map;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) CacheException(javax.cache.CacheException)

Example 3 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class PageLog method removeUntil.

/**
 * Remove all pages until the given data page.
 *
 * @param trunkPage the first trunk page
 * @param firstDataPageToKeep the first data page to keep
 * @return the trunk page of the data page to keep
 */
private int removeUntil(int trunkPage, int firstDataPageToKeep) {
    trace.debug("log.removeUntil " + trunkPage + " " + firstDataPageToKeep);
    int last = trunkPage;
    while (true) {
        Page p = store.getPage(trunkPage);
        PageStreamTrunk t = (PageStreamTrunk) p;
        if (t == null) {
            throw DbException.throwInternalError("log.removeUntil not found: " + firstDataPageToKeep + " last " + last);
        }
        logKey = t.getLogKey();
        last = t.getPos();
        if (t.contains(firstDataPageToKeep)) {
            return last;
        }
        trunkPage = t.getNextTrunk();
        IntArray list = new IntArray();
        list.add(t.getPos());
        for (int i = 0; ; i++) {
            int next = t.getPageData(i);
            if (next == -1) {
                break;
            }
            list.add(next);
        }
        freeLogPages(list);
        pageOut.free(t);
    }
}
Also used : IntArray(org.h2.util.IntArray)

Example 4 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class TestIntArray method testRandom.

private void testRandom() {
    IntArray array = new IntArray();
    int[] test = {};
    Random random = new Random(1);
    for (int i = 0; i < 10000; i++) {
        int idx = test.length == 0 ? 0 : random.nextInt(test.length);
        int v = random.nextInt(100);
        int op = random.nextInt(4);
        switch(op) {
            case 0:
                array.add(v);
                test = add(test, v);
                break;
            case 1:
                if (test.length > idx) {
                    assertEquals(get(test, idx), array.get(idx));
                }
                break;
            case 2:
                if (test.length > 0) {
                    array.remove(idx);
                    test = remove(test, idx);
                }
                break;
            case 3:
                assertEquals(test.length, array.size());
                break;
            default:
        }
        assertEquals(test.length, array.size());
        for (int j = 0; j < test.length; j++) {
            assertEquals(test[j], array.get(j));
        }
    }
}
Also used : IntArray(org.h2.util.IntArray) Random(java.util.Random)

Example 5 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class TestIntArray method testInit.

private static void testInit() {
    IntArray array = new IntArray(new int[0]);
    array.add(10);
}
Also used : IntArray(org.h2.util.IntArray)

Aggregations

IntArray (org.h2.util.IntArray)17 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 HashMap (java.util.HashMap)7 CacheException (javax.cache.CacheException)6 LinkedHashMap (java.util.LinkedHashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 List (java.util.List)3 ResultSet (java.sql.ResultSet)2 ArrayList (java.util.ArrayList)2 Collections.singletonList (java.util.Collections.singletonList)2 HashSet (java.util.HashSet)2 Map (java.util.Map)2 Set (java.util.Set)2 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)2 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)2 GridQueryCancelRequest (org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest)2 GridIntList (org.apache.ignite.internal.util.GridIntList)2 BitSet (java.util.BitSet)1 Random (java.util.Random)1 UUID (java.util.UUID)1