use of org.h2.util.IntArray in project h2database by h2database.
the class TestIntArray method testRemoveRange.
private void testRemoveRange() {
IntArray array = new IntArray(new int[] { 1, 2, 3, 4, 5 });
array.removeRange(1, 3);
assertEquals(3, array.size());
assertEquals(1, array.get(0));
assertEquals(4, array.get(1));
assertEquals(5, array.get(2));
}
use of org.h2.util.IntArray in project ignite by apache.
the class GridReduceQueryExecutor method createNodeToSegmentsCountMapping.
/**
* Creates a mapping of node -> expected segments to scan on this particular node.
*
* @param qry Query to create mapping for.
* @param mapping Nodes to partition mapping.
* @return Mapping of node to segments.
*/
private Map<ClusterNode, Integer> createNodeToSegmentsCountMapping(GridCacheTwoStepQuery qry, ReducePartitionMapResult mapping) {
Map<ClusterNode, Integer> res = new HashMap<>();
Collection<ClusterNode> nodes = mapping.nodes();
if (qry.explain() || qry.isReplicatedOnly()) {
for (ClusterNode node : nodes) {
Integer prev = res.put(node, 1);
assert prev == null;
}
return res;
}
final int segments = mapper.findFirstPartitioned(qry.cacheIds()).config().getQueryParallelism();
for (ClusterNode node : nodes) {
Map<ClusterNode, IntArray> partsMapping = mapping.queryPartitionsMap();
if (partsMapping != null) {
BitSet bs = new BitSet(segments);
IntArray parts = partsMapping.get(node);
for (int i = 0; i < parts.size(); i++) bs.set(InlineIndexImpl.calculateSegment(segments, parts.get(i)));
Integer prev = res.put(node, bs.cardinality());
assert prev == null;
} else
res.put(node, segments);
}
return res;
}
use of org.h2.util.IntArray in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (F.isEmpty(nodes))
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final long reqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(reqId);
final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.add(() -> {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.h2.util.IntArray in project ignite by apache.
the class GridReduceQueryExecutor method stableDataNodesMap.
/**
* @param topVer Topology version.
* @param cctx Cache context.
* @param parts Partitions.
*/
private Map<ClusterNode, IntArray> stableDataNodesMap(AffinityTopologyVersion topVer, final GridCacheContext<?, ?> cctx, @Nullable final int[] parts) {
Map<ClusterNode, IntArray> mapping = new HashMap<>();
// Explicit partitions mapping is not applicable to replicated cache.
if (cctx.isReplicated()) {
for (ClusterNode clusterNode : cctx.affinity().assignment(topVer).nodes()) mapping.put(clusterNode, null);
return mapping;
}
List<List<ClusterNode>> assignment = cctx.affinity().assignment(topVer).assignment();
boolean needPartsFilter = parts != null;
GridIntIterator iter = needPartsFilter ? new GridIntList(parts).iterator() : U.forRange(0, cctx.affinity().partitions());
while (iter.hasNext()) {
int partId = iter.next();
List<ClusterNode> partNodes = assignment.get(partId);
if (!partNodes.isEmpty()) {
ClusterNode prim = partNodes.get(0);
if (!needPartsFilter) {
mapping.put(prim, null);
continue;
}
IntArray partIds = mapping.get(prim);
if (partIds == null) {
partIds = new IntArray();
mapping.put(prim, partIds);
}
partIds.add(partId);
}
}
return mapping;
}
use of org.h2.util.IntArray in project ignite by apache.
the class GridReduceQueryExecutor method narrowForQuery.
/**
*/
private Map<ClusterNode, IntArray> narrowForQuery(Map<ClusterNode, IntArray> partsMap, int[] parts) {
if (parts == null)
return partsMap;
Map<ClusterNode, IntArray> cp = U.newHashMap(partsMap.size());
for (Map.Entry<ClusterNode, IntArray> entry : partsMap.entrySet()) {
IntArray filtered = new IntArray(parts.length);
IntArray orig = entry.getValue();
for (int i = 0; i < orig.size(); i++) {
int p = orig.get(i);
if (Arrays.binarySearch(parts, p) >= 0)
filtered.add(p);
}
if (filtered.size() > 0)
cp.put(entry.getKey(), filtered);
}
return cp.isEmpty() ? null : cp;
}
Aggregations