Search in sources :

Example 6 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class TestIntArray method testRemoveRange.

private void testRemoveRange() {
    IntArray array = new IntArray(new int[] { 1, 2, 3, 4, 5 });
    array.removeRange(1, 3);
    assertEquals(3, array.size());
    assertEquals(1, array.get(0));
    assertEquals(4, array.get(1));
    assertEquals(5, array.get(2));
}
Also used : IntArray(org.h2.util.IntArray)

Example 7 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method createNodeToSegmentsCountMapping.

/**
 * Creates a mapping of node -> expected segments to scan on this particular node.
 *
 * @param qry Query to create mapping for.
 * @param mapping Nodes to partition mapping.
 * @return Mapping of node to segments.
 */
private Map<ClusterNode, Integer> createNodeToSegmentsCountMapping(GridCacheTwoStepQuery qry, ReducePartitionMapResult mapping) {
    Map<ClusterNode, Integer> res = new HashMap<>();
    Collection<ClusterNode> nodes = mapping.nodes();
    if (qry.explain() || qry.isReplicatedOnly()) {
        for (ClusterNode node : nodes) {
            Integer prev = res.put(node, 1);
            assert prev == null;
        }
        return res;
    }
    final int segments = mapper.findFirstPartitioned(qry.cacheIds()).config().getQueryParallelism();
    for (ClusterNode node : nodes) {
        Map<ClusterNode, IntArray> partsMapping = mapping.queryPartitionsMap();
        if (partsMapping != null) {
            BitSet bs = new BitSet(segments);
            IntArray parts = partsMapping.get(node);
            for (int i = 0; i < parts.size(); i++) bs.set(InlineIndexImpl.calculateSegment(segments, parts.get(i)));
            Integer prev = res.put(node, bs.cardinality());
            assert prev == null;
        } else
            res.put(node, segments);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) BitSet(java.util.BitSet)

Example 8 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (F.isEmpty(nodes))
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final long reqId = qryReqIdGen.incrementAndGet();
    h2.runningQueryManager().trackRequestId(reqId);
    final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.add(() -> {
            r.future().onCancelled();
            send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) DmlDistributedUpdateRun(org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedUpdateRun) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Example 9 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method stableDataNodesMap.

/**
 * @param topVer Topology version.
 * @param cctx Cache context.
 * @param parts Partitions.
 */
private Map<ClusterNode, IntArray> stableDataNodesMap(AffinityTopologyVersion topVer, final GridCacheContext<?, ?> cctx, @Nullable final int[] parts) {
    Map<ClusterNode, IntArray> mapping = new HashMap<>();
    // Explicit partitions mapping is not applicable to replicated cache.
    if (cctx.isReplicated()) {
        for (ClusterNode clusterNode : cctx.affinity().assignment(topVer).nodes()) mapping.put(clusterNode, null);
        return mapping;
    }
    List<List<ClusterNode>> assignment = cctx.affinity().assignment(topVer).assignment();
    boolean needPartsFilter = parts != null;
    GridIntIterator iter = needPartsFilter ? new GridIntList(parts).iterator() : U.forRange(0, cctx.affinity().partitions());
    while (iter.hasNext()) {
        int partId = iter.next();
        List<ClusterNode> partNodes = assignment.get(partId);
        if (!partNodes.isEmpty()) {
            ClusterNode prim = partNodes.get(0);
            if (!needPartsFilter) {
                mapping.put(prim, null);
                continue;
            }
            IntArray partIds = mapping.get(prim);
            if (partIds == null) {
                partIds = new IntArray();
                mapping.put(prim, partIds);
            }
            partIds.add(partId);
        }
    }
    return mapping;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) GridIntIterator(org.apache.ignite.internal.util.GridIntIterator) Collections.singletonList(java.util.Collections.singletonList) GridIntList(org.apache.ignite.internal.util.GridIntList) List(java.util.List) ArrayList(java.util.ArrayList) GridIntList(org.apache.ignite.internal.util.GridIntList)

Example 10 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method narrowForQuery.

/**
 */
private Map<ClusterNode, IntArray> narrowForQuery(Map<ClusterNode, IntArray> partsMap, int[] parts) {
    if (parts == null)
        return partsMap;
    Map<ClusterNode, IntArray> cp = U.newHashMap(partsMap.size());
    for (Map.Entry<ClusterNode, IntArray> entry : partsMap.entrySet()) {
        IntArray filtered = new IntArray(parts.length);
        IntArray orig = entry.getValue();
        for (int i = 0; i < orig.size(); i++) {
            int p = orig.get(i);
            if (Arrays.binarySearch(parts, p) >= 0)
                filtered.add(p);
        }
        if (filtered.size() > 0)
            cp.put(entry.getKey(), filtered);
    }
    return cp.isEmpty() ? null : cp;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

IntArray (org.h2.util.IntArray)17 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 HashMap (java.util.HashMap)7 CacheException (javax.cache.CacheException)6 LinkedHashMap (java.util.LinkedHashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 List (java.util.List)3 ResultSet (java.sql.ResultSet)2 ArrayList (java.util.ArrayList)2 Collections.singletonList (java.util.Collections.singletonList)2 HashSet (java.util.HashSet)2 Map (java.util.Map)2 Set (java.util.Set)2 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)2 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)2 GridQueryCancelRequest (org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest)2 GridIntList (org.apache.ignite.internal.util.GridIntList)2 BitSet (java.util.BitSet)1 Random (java.util.Random)1 UUID (java.util.UUID)1