Search in sources :

Example 11 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class GridReduceQueryExecutor method query.

/**
 * @param schemaName Schema name.
 * @param qry Query.
 * @param keepBinary Keep binary.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param timeoutMillis Timeout in milliseconds.
 * @param cancel Query cancel.
 * @param params Query parameters.
 * @param parts Partitions.
 * @param lazy Lazy execution flag.
 * @return Rows iterator.
 */
public Iterator<List<?>> query(String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, final int[] parts, boolean lazy) {
    if (F.isEmpty(params))
        params = EMPTY_PARAMS;
    final boolean isReplicatedOnly = qry.isReplicatedOnly();
    // Fail if all caches are replicated and explicit partitions are set.
    for (int attempt = 0; ; attempt++) {
        if (attempt != 0) {
            try {
                // Wait for exchange.
                Thread.sleep(attempt * 10);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new CacheException("Query was interrupted.", e);
            }
        }
        final long qryReqId = qryIdGen.incrementAndGet();
        final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName, h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(), U.currentTimeMillis(), cancel);
        AffinityTopologyVersion topVer = h2.readyTopologyVersion();
        // Check if topology is changed while retrying on locked topology.
        if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
            throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
        }
        List<Integer> cacheIds = qry.cacheIds();
        Collection<ClusterNode> nodes;
        // Explicit partition mapping for unstable topology.
        Map<ClusterNode, IntArray> partsMap = null;
        // Explicit partitions mapping for query.
        Map<ClusterNode, IntArray> qryMap = null;
        // Partitions are not supported for queries over all replicated caches.
        if (parts != null) {
            boolean replicatedOnly = true;
            for (Integer cacheId : cacheIds) {
                if (!cacheContext(cacheId).isReplicated()) {
                    replicatedOnly = false;
                    break;
                }
            }
            if (replicatedOnly)
                throw new CacheException("Partitions are not supported for replicated caches");
        }
        if (qry.isLocal())
            nodes = singletonList(ctx.discovery().localNode());
        else {
            NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
            nodes = nodesParts.nodes();
            partsMap = nodesParts.partitionsMap();
            qryMap = nodesParts.queryPartitionsMap();
            if (nodes == null)
                // Retry.
                continue;
            assert !nodes.isEmpty();
            if (isReplicatedOnly || qry.explain()) {
                ClusterNode locNode = ctx.discovery().localNode();
                // Always prefer local node if possible.
                if (nodes.contains(locNode))
                    nodes = singletonList(locNode);
                else {
                    // Select random data node to run query on a replicated data or
                    // get EXPLAIN PLAN from a single node.
                    nodes = singletonList(F.rand(nodes));
                }
            }
        }
        int tblIdx = 0;
        final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
        final int segmentsPerIndex = qry.explain() || isReplicatedOnly ? 1 : findFirstPartitioned(cacheIds).config().getQueryParallelism();
        int replicatedQrysCnt = 0;
        final Collection<ClusterNode> finalNodes = nodes;
        for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
            GridMergeIndex idx;
            if (!skipMergeTbl) {
                GridMergeTable tbl;
                try {
                    tbl = createMergeTable(r.connection(), mapQry, qry.explain());
                } catch (IgniteCheckedException e) {
                    throw new IgniteException(e);
                }
                idx = tbl.getMergeIndex();
                fakeTable(r.connection(), tblIdx++).innerTable(tbl);
            } else
                idx = GridMergeIndexUnsorted.createDummy(ctx);
            // If the query has only replicated tables, we have to run it on a single node only.
            if (!mapQry.isPartitioned()) {
                ClusterNode node = F.rand(nodes);
                mapQry.node(node.id());
                replicatedQrysCnt++;
                // Replicated tables can have only 1 segment.
                idx.setSources(singletonList(node), 1);
            } else
                idx.setSources(nodes, segmentsPerIndex);
            idx.setPageSize(r.pageSize());
            r.indexes().add(idx);
        }
        r.latch(new CountDownLatch(isReplicatedOnly ? 1 : (r.indexes().size() - replicatedQrysCnt) * nodes.size() * segmentsPerIndex + replicatedQrysCnt));
        runs.put(qryReqId, r);
        boolean release = true;
        try {
            cancel.checkCancelled();
            if (ctx.clientDisconnected()) {
                throw new CacheException("Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException(ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
            }
            List<GridCacheSqlQuery> mapQrys = qry.mapQueries();
            if (qry.explain()) {
                mapQrys = new ArrayList<>(qry.mapQueries().size());
                for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query()).parameterIndexes(mapQry.parameterIndexes()));
            }
            final boolean distributedJoins = qry.distributedJoins();
            cancel.set(new Runnable() {

                @Override
                public void run() {
                    send(finalNodes, new GridQueryCancelRequest(qryReqId), null, false);
                }
            });
            boolean retry = false;
            // Always enforce join order on map side to have consistent behavior.
            int flags = GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
            if (distributedJoins)
                flags |= GridH2QueryRequest.FLAG_DISTRIBUTED_JOINS;
            if (qry.isLocal())
                flags |= GridH2QueryRequest.FLAG_IS_LOCAL;
            if (qry.explain())
                flags |= GridH2QueryRequest.FLAG_EXPLAIN;
            if (isReplicatedOnly)
                flags |= GridH2QueryRequest.FLAG_REPLICATED;
            if (lazy && mapQrys.size() == 1)
                flags |= GridH2QueryRequest.FLAG_LAZY;
            GridH2QueryRequest req = new GridH2QueryRequest().requestId(qryReqId).topologyVersion(topVer).pageSize(r.pageSize()).caches(qry.cacheIds()).tables(distributedJoins ? qry.tables() : null).partitions(convert(partsMap)).queries(mapQrys).parameters(params).flags(flags).timeout(timeoutMillis).schemaName(schemaName);
            if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
                awaitAllReplies(r, nodes, cancel);
                Object state = r.state();
                if (state != null) {
                    if (state instanceof CacheException) {
                        CacheException err = (CacheException) state;
                        if (err.getCause() instanceof IgniteClientDisconnectedException)
                            throw err;
                        if (wasCancelled(err))
                            // Throw correct exception.
                            throw new QueryCancelledException();
                        throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
                    }
                    if (state instanceof AffinityTopologyVersion) {
                        retry = true;
                        // If remote node asks us to retry then we have outdated full partition map.
                        h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
                    }
                }
            } else
                // Send failed.
                retry = true;
            Iterator<List<?>> resIter = null;
            if (!retry) {
                if (skipMergeTbl) {
                    resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
                    release = false;
                } else {
                    cancel.checkCancelled();
                    UUID locNodeId = ctx.localNodeId();
                    H2Utils.setupConnection(r.connection(), false, enforceJoinOrder);
                    GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, qryReqId, REDUCE).pageSize(r.pageSize()).distributedJoinMode(OFF));
                    try {
                        if (qry.explain())
                            return explainPlan(r.connection(), qry, params);
                        GridCacheSqlQuery rdc = qry.reduceQuery();
                        ResultSet res = h2.executeSqlQueryWithTimer(r.connection(), rdc.query(), F.asList(rdc.parameters(params)), // The statement will cache some extra thread local objects.
                        false, timeoutMillis, cancel);
                        resIter = new H2FieldsIterator(res);
                    } finally {
                        GridH2QueryContext.clearThreadLocal();
                    }
                }
            }
            if (retry) {
                if (Thread.currentThread().isInterrupted())
                    throw new IgniteInterruptedCheckedException("Query was interrupted.");
                continue;
            }
            return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
        } catch (IgniteCheckedException | RuntimeException e) {
            release = true;
            U.closeQuiet(r.connection());
            if (e instanceof CacheException) {
                if (wasCancelled((CacheException) e))
                    throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
                throw (CacheException) e;
            }
            Throwable cause = e;
            if (e instanceof IgniteCheckedException) {
                Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
                if (disconnectedErr != null)
                    cause = disconnectedErr;
            }
            throw new CacheException("Failed to run reduce query locally.", cause);
        } finally {
            if (release) {
                releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
                if (!skipMergeTbl) {
                    for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) // Drop all merge tables.
                    fakeTable(null, i).innerTable(null);
                }
            }
        }
    }
}
Also used : GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) CacheException(javax.cache.CacheException) H2FieldsIterator(org.apache.ignite.internal.processors.query.h2.H2FieldsIterator) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray) IgniteException(org.apache.ignite.IgniteException) ResultSet(java.sql.ResultSet) Collections.singletonList(java.util.Collections.singletonList) GridIntList(org.apache.ignite.internal.util.GridIntList) List(java.util.List) ArrayList(java.util.ArrayList) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) UUID(java.util.UUID) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) GridH2QueryRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest) CountDownLatch(java.util.concurrent.CountDownLatch) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Example 12 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class PageOutputStream method freeReserved.

/**
 * Free up all reserved pages.
 */
void freeReserved() {
    if (reservedPages.size() > 0) {
        int[] array = new int[reservedPages.size()];
        reservedPages.toArray(array);
        reservedPages = new IntArray();
        reserved = 0;
        for (int p : array) {
            store.free(p, false);
        }
    }
}
Also used : IntArray(org.h2.util.IntArray)

Example 13 with IntArray

use of org.h2.util.IntArray in project h2database by h2database.

the class ViewIndex method getQuery.

private Query getQuery(Session session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder) {
    Query q = prepareSubQuery(querySQL, session, masks, filters, filter, sortOrder);
    if (masks == null) {
        return q;
    }
    if (!q.allowGlobalConditions()) {
        return q;
    }
    int firstIndexParam = view.getParameterOffset(originalParameters);
    // the column index of each parameter
    // (for example: paramColumnIndex {0, 0} mean
    // param[0] is column 0, and param[1] is also column 0)
    IntArray paramColumnIndex = new IntArray();
    int indexColumnCount = 0;
    for (int i = 0; i < masks.length; i++) {
        int mask = masks[i];
        if (mask == 0) {
            continue;
        }
        indexColumnCount++;
        // the number of parameters depends on the mask;
        // for range queries it is 2: >= x AND <= y
        // but bitMask could also be 7 (=, and <=, and >=)
        int bitCount = Integer.bitCount(mask);
        for (int j = 0; j < bitCount; j++) {
            paramColumnIndex.add(i);
        }
    }
    int len = paramColumnIndex.size();
    ArrayList<Column> columnList = New.arrayList();
    for (int i = 0; i < len; ) {
        int idx = paramColumnIndex.get(i);
        columnList.add(table.getColumn(idx));
        int mask = masks[idx];
        if ((mask & IndexCondition.EQUALITY) != 0) {
            Parameter param = new Parameter(firstIndexParam + i);
            q.addGlobalCondition(param, idx, Comparison.EQUAL_NULL_SAFE);
            i++;
        }
        if ((mask & IndexCondition.START) != 0) {
            Parameter param = new Parameter(firstIndexParam + i);
            q.addGlobalCondition(param, idx, Comparison.BIGGER_EQUAL);
            i++;
        }
        if ((mask & IndexCondition.END) != 0) {
            Parameter param = new Parameter(firstIndexParam + i);
            q.addGlobalCondition(param, idx, Comparison.SMALLER_EQUAL);
            i++;
        }
        if ((mask & IndexCondition.SPATIAL_INTERSECTS) != 0) {
            Parameter param = new Parameter(firstIndexParam + i);
            q.addGlobalCondition(param, idx, Comparison.SPATIAL_INTERSECTS);
            i++;
        }
    }
    columns = columnList.toArray(new Column[0]);
    // reconstruct the index columns from the masks
    this.indexColumns = new IndexColumn[indexColumnCount];
    this.columnIds = new int[indexColumnCount];
    for (int type = 0, indexColumnId = 0; type < 2; type++) {
        for (int i = 0; i < masks.length; i++) {
            int mask = masks[i];
            if (mask == 0) {
                continue;
            }
            if (type == 0) {
                if ((mask & IndexCondition.EQUALITY) == 0) {
                    // the first columns need to be equality conditions
                    continue;
                }
            } else {
                if ((mask & IndexCondition.EQUALITY) != 0) {
                    // after that only range conditions
                    continue;
                }
            }
            IndexColumn c = new IndexColumn();
            c.column = table.getColumn(i);
            indexColumns[indexColumnId] = c;
            columnIds[indexColumnId] = c.column.getColumnId();
            indexColumnId++;
        }
    }
    String sql = q.getPlanSQL();
    q = prepareSubQuery(sql, session, masks, filters, filter, sortOrder);
    return q;
}
Also used : Query(org.h2.command.dml.Query) IntArray(org.h2.util.IntArray) Column(org.h2.table.Column) IndexColumn(org.h2.table.IndexColumn) Parameter(org.h2.expression.Parameter) IndexColumn(org.h2.table.IndexColumn)

Example 14 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class ReducePartitionMapper method nodesForPartitions.

/**
 * Evaluates nodes and nodes to partitions map given a list of cache ids, topology version and partitions.
 *
 * @param cacheIds Cache ids.
 * @param topVer Topology version.
 * @param parts Partitions array.
 * @param isReplicatedOnly Allow only replicated caches.
 * @return Result.
 */
public ReducePartitionMapResult nodesForPartitions(List<Integer> cacheIds, AffinityTopologyVersion topVer, int[] parts, boolean isReplicatedOnly) {
    Collection<ClusterNode> nodes = null;
    Map<ClusterNode, IntArray> partsMap = null;
    Map<ClusterNode, IntArray> qryMap = null;
    for (int cacheId : cacheIds) {
        GridCacheContext<?, ?> cctx = cacheContext(cacheId);
        Collection<Integer> lostParts = cctx.topology().lostPartitions();
        if (!lostParts.isEmpty()) {
            int lostPart = parts == null ? lostParts.iterator().next() : IntStream.of(parts).filter(lostParts::contains).findFirst().orElse(-1);
            if (lostPart >= 0) {
                throw new CacheException(new CacheInvalidStateException("Failed to execute query because cache " + "partition has been lostPart [cacheName=" + cctx.name() + ", part=" + lostPart + ']'));
            }
        }
    }
    if (isPreloadingActive(cacheIds)) {
        if (isReplicatedOnly)
            nodes = replicatedUnstableDataNodes(cacheIds);
        else {
            partsMap = partitionedUnstableDataNodes(cacheIds);
            if (partsMap != null) {
                qryMap = narrowForQuery(partsMap, parts);
                nodes = qryMap == null ? null : qryMap.keySet();
            }
        }
    } else {
        if (parts == null)
            nodes = stableDataNodes(cacheIds, topVer, isReplicatedOnly);
        else {
            qryMap = stableDataNodesForPartitions(topVer, cacheIds, parts);
            if (qryMap != null)
                nodes = qryMap.keySet();
        }
    }
    return new ReducePartitionMapResult(nodes, partsMap, qryMap);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) CacheException(javax.cache.CacheException) CacheInvalidStateException(org.apache.ignite.internal.processors.cache.CacheInvalidStateException)

Example 15 with IntArray

use of org.h2.util.IntArray in project ignite by apache.

the class ReducePartitionMapper method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition or a lost partition is found.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p)) || cctx.topology().lostPartitions().contains(p)) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE))) {
                if (log.isInfoEnabled()) {
                    logRetry("Failed to calculate nodes for SQL query (partition has no owners, but corresponding " + "cache group has data nodes) [cacheIds=" + cacheIds + ", cacheName=" + cctx.name() + ", cacheId=" + cctx.cacheId() + ", part=" + p + ", cacheGroupId=" + cctx.groupId() + ']');
                }
                // Retry.
                return null;
            }
            throw new CacheServerNotFoundException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE))) {
                        if (log.isInfoEnabled()) {
                            logRetry("Failed to calculate nodes for SQL query (partition has no owners, but " + "corresponding cache group has data nodes) [ cacheIds=" + cacheIds + ", cacheName=" + extraCctx.name() + ", cacheId=" + extraCctx.cacheId() + ", part=" + p + ", cacheGroupId=" + extraCctx.groupId() + ']');
                        }
                        // Retry.
                        return null;
                    }
                    throw new CacheServerNotFoundException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty()) {
                        if (log.isInfoEnabled()) {
                            logRetry("Failed to calculate nodes for SQL query (caches have no common data nodes for " + "partition) [cacheIds=" + cacheIds + ", lastCacheName=" + extraCctx.name() + ", lastCacheId=" + extraCctx.cacheId() + ", part=" + p + ']');
                        }
                        // Intersection is empty -> retry.
                        return null;
                    }
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            int part = 0;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty()) {
                    if (log.isInfoEnabled()) {
                        logRetry("Failed to calculate nodes for SQL query (caches have no common data nodes for " + "partition) [cacheIds=" + cacheIds + ", lastReplicatedCacheName=" + extraCctx.name() + ", lastReplicatedCacheId=" + extraCctx.cacheId() + ", part=" + part + ']');
                    }
                    // Retry.
                    return null;
                }
                part++;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) CacheServerNotFoundException(org.apache.ignite.cache.CacheServerNotFoundException) HashSet(java.util.HashSet) Set(java.util.Set) CacheException(javax.cache.CacheException) HashMap(java.util.HashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Aggregations

IntArray (org.h2.util.IntArray)17 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 HashMap (java.util.HashMap)7 CacheException (javax.cache.CacheException)6 LinkedHashMap (java.util.LinkedHashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 List (java.util.List)3 ResultSet (java.sql.ResultSet)2 ArrayList (java.util.ArrayList)2 Collections.singletonList (java.util.Collections.singletonList)2 HashSet (java.util.HashSet)2 Map (java.util.Map)2 Set (java.util.Set)2 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)2 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)2 GridQueryCancelRequest (org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest)2 GridIntList (org.apache.ignite.internal.util.GridIntList)2 BitSet (java.util.BitSet)1 Random (java.util.Random)1 UUID (java.util.UUID)1