Search in sources :

Example 1 with IntArray

use of com.android.launcher3.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
                // Retry.
                return null;
            throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
                        // Retry.
                        return null;
                    throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty())
                        // Intersection is empty -> retry.
                        return null;
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty())
                    // Retry.
                    return null;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ResultSet(java.sql.ResultSet) Set(java.util.Set) HashSet(java.util.HashSet) CacheException(javax.cache.CacheException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Example 2 with IntArray

use of com.android.launcher3.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method stableDataNodes.

/**
 * @param isReplicatedOnly If we must only have replicated caches.
 * @param topVer Topology version.
 * @param cacheIds Participating cache IDs.
 * @param parts Partitions.
 * @return Data nodes or {@code null} if repartitioning started and we need to retry.
 */
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
    GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
    Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
    Set<ClusterNode> nodes = map.keySet();
    if (F.isEmpty(map))
        throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
    for (int i = 1; i < cacheIds.size(); i++) {
        GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
        String extraCacheName = extraCctx.name();
        if (extraCctx.isLocal())
            // No consistency guaranties for local caches.
            continue;
        if (isReplicatedOnly && !extraCctx.isReplicated())
            throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
        Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
        if (F.isEmpty(extraNodes))
            throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
        boolean disjoint;
        if (extraCctx.isReplicated()) {
            if (isReplicatedOnly) {
                nodes.retainAll(extraNodes);
                disjoint = map.isEmpty();
            } else
                disjoint = !extraNodes.containsAll(nodes);
        } else
            disjoint = !extraNodes.equals(nodes);
        if (disjoint) {
            if (isPreloadingActive(cacheIds))
                // Retry.
                return null;
            else
                throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
        }
    }
    return map;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) CacheException(javax.cache.CacheException)

Example 3 with IntArray

use of com.android.launcher3.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method createNodeToSegmentsCountMapping.

/**
 * Creates a mapping of node -> expected segments to scan on this particular node.
 *
 * @param qry Query to create mapping for.
 * @param mapping Nodes to partition mapping.
 * @return Mapping of node to segments.
 */
private Map<ClusterNode, Integer> createNodeToSegmentsCountMapping(GridCacheTwoStepQuery qry, ReducePartitionMapResult mapping) {
    Map<ClusterNode, Integer> res = new HashMap<>();
    Collection<ClusterNode> nodes = mapping.nodes();
    if (qry.explain() || qry.isReplicatedOnly()) {
        for (ClusterNode node : nodes) {
            Integer prev = res.put(node, 1);
            assert prev == null;
        }
        return res;
    }
    final int segments = mapper.findFirstPartitioned(qry.cacheIds()).config().getQueryParallelism();
    for (ClusterNode node : nodes) {
        Map<ClusterNode, IntArray> partsMapping = mapping.queryPartitionsMap();
        if (partsMapping != null) {
            BitSet bs = new BitSet(segments);
            IntArray parts = partsMapping.get(node);
            for (int i = 0; i < parts.size(); i++) bs.set(InlineIndexImpl.calculateSegment(segments, parts.get(i)));
            Integer prev = res.put(node, bs.cardinality());
            assert prev == null;
        } else
            res.put(node, segments);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) BitSet(java.util.BitSet)

Example 4 with IntArray

use of com.android.launcher3.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (F.isEmpty(nodes))
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final long reqId = qryReqIdGen.incrementAndGet();
    h2.runningQueryManager().trackRequestId(reqId);
    final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.add(() -> {
            r.future().onCancelled();
            send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) DmlDistributedUpdateRun(org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedUpdateRun) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Example 5 with IntArray

use of com.android.launcher3.util.IntArray in project h2database by h2database.

the class PageLog method removeUntil.

/**
 * Remove all pages until the given data page.
 *
 * @param trunkPage the first trunk page
 * @param firstDataPageToKeep the first data page to keep
 * @return the trunk page of the data page to keep
 */
private int removeUntil(int trunkPage, int firstDataPageToKeep) {
    trace.debug("log.removeUntil " + trunkPage + " " + firstDataPageToKeep);
    int last = trunkPage;
    while (true) {
        Page p = store.getPage(trunkPage);
        PageStreamTrunk t = (PageStreamTrunk) p;
        if (t == null) {
            throw DbException.throwInternalError("log.removeUntil not found: " + firstDataPageToKeep + " last " + last);
        }
        logKey = t.getLogKey();
        last = t.getPos();
        if (t.contains(firstDataPageToKeep)) {
            return last;
        }
        trunkPage = t.getNextTrunk();
        IntArray list = new IntArray();
        list.add(t.getPos());
        for (int i = 0; ; i++) {
            int next = t.getPageData(i);
            if (next == -1) {
                break;
            }
            list.add(next);
        }
        freeLogPages(list);
        pageOut.free(t);
    }
}
Also used : IntArray(org.h2.util.IntArray)

Aggregations

IntArray (org.h2.util.IntArray)22 IntArray (com.android.launcher3.util.IntArray)13 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 ArrayList (java.util.ArrayList)10 HashMap (java.util.HashMap)7 CacheException (javax.cache.CacheException)6 ItemInfo (com.android.launcher3.model.data.ItemInfo)5 Point (android.graphics.Point)4 WorkspaceItemInfo (com.android.launcher3.model.data.WorkspaceItemInfo)4 LinkedHashMap (java.util.LinkedHashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 LauncherAppWidgetInfo (com.android.launcher3.model.data.LauncherAppWidgetInfo)3 List (java.util.List)3 Parameter (org.h2.expression.Parameter)3 Column (org.h2.table.Column)3 IndexColumn (org.h2.table.IndexColumn)3 Intent (android.content.Intent)2 SessionInfo (android.content.pm.PackageInstaller.SessionInfo)2 Cursor (android.database.Cursor)2 CellLayout (com.android.launcher3.CellLayout)2