use of org.h2.command.dml.Set in project ignite by apache.
the class GridReduceQueryExecutor method partitionedUnstableDataNodes.
/**
* Calculates partition mapping for partitioned cache on unstable topology.
*
* @param cacheIds Cache IDs.
* @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
*/
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
// If the main cache is replicated, just replace it with the first partitioned.
GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
final int partsCnt = cctx.affinity().partitions();
if (cacheIds.size() > 1) {
// Check correct number of partitions for partitioned caches.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
int parts = extraCctx.affinity().partitions();
if (parts != partsCnt)
throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
}
}
Set<ClusterNode>[] partLocs = new Set[partsCnt];
// Fill partition locations for main cache.
for (int p = 0; p < partsCnt; p++) {
List<ClusterNode> owners = cctx.topology().owners(p);
if (F.isEmpty(owners)) {
// Handle special case: no mapping is configured for a partition.
if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
// Mark unmapped partition.
partLocs[p] = UNMAPPED_PARTS;
continue;
} else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
}
partLocs[p] = new HashSet<>(owners);
}
if (cacheIds.size() > 1) {
// We need this for logical collocation between different partitioned caches with the same affinity.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
// This is possible if we have replaced a replicated cache with a partitioned one earlier.
if (cctx == extraCctx)
continue;
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
List<ClusterNode> owners = extraCctx.topology().owners(p);
if (partLocs[p] == UNMAPPED_PARTS)
// Skip unmapped partitions.
continue;
if (F.isEmpty(owners)) {
if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
}
if (partLocs[p] == null)
partLocs[p] = new HashSet<>(owners);
else {
// Intersection of owners.
partLocs[p].retainAll(owners);
if (partLocs[p].isEmpty())
// Intersection is empty -> retry.
return null;
}
}
}
// Filter nodes where not all the replicated caches loaded.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (!extraCctx.isReplicated())
continue;
Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
if (F.isEmpty(dataNodes))
// Retry.
return null;
for (Set<ClusterNode> partLoc : partLocs) {
if (partLoc == UNMAPPED_PARTS)
// Skip unmapped partition.
continue;
partLoc.retainAll(dataNodes);
if (partLoc.isEmpty())
// Retry.
return null;
}
}
}
// Collect the final partitions mapping.
Map<ClusterNode, IntArray> res = new HashMap<>();
// Here partitions in all IntArray's will be sorted in ascending order, this is important.
for (int p = 0; p < partLocs.length; p++) {
Set<ClusterNode> pl = partLocs[p];
// Skip unmapped partitions.
if (pl == UNMAPPED_PARTS)
continue;
assert !F.isEmpty(pl) : pl;
ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
IntArray parts = res.get(n);
if (parts == null)
res.put(n, parts = new IntArray());
parts.add(p);
}
return res;
}
use of org.h2.command.dml.Set in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.h2.command.dml.Set in project ignite by apache.
the class GridReduceQueryExecutor method stableDataNodes.
/**
* @param isReplicatedOnly If we must only have replicated caches.
* @param topVer Topology version.
* @param cacheIds Participating cache IDs.
* @param parts Partitions.
* @return Data nodes or {@code null} if repartitioning started and we need to retry.
*/
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
Set<ClusterNode> nodes = map.keySet();
if (F.isEmpty(map))
throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
for (int i = 1; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
String extraCacheName = extraCctx.name();
if (extraCctx.isLocal())
// No consistency guaranties for local caches.
continue;
if (isReplicatedOnly && !extraCctx.isReplicated())
throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
if (F.isEmpty(extraNodes))
throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
boolean disjoint;
if (extraCctx.isReplicated()) {
if (isReplicatedOnly) {
nodes.retainAll(extraNodes);
disjoint = map.isEmpty();
} else
disjoint = !extraNodes.containsAll(nodes);
} else
disjoint = !extraNodes.equals(nodes);
if (disjoint) {
if (isPreloadingActive(cacheIds))
// Retry.
return null;
else
throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
}
}
return map;
}
use of org.h2.command.dml.Set in project ignite by apache.
the class GAGridFunction method getChromosomes.
/**
* Helper routine to return 'pivoted' results using the provided query param
*
* @param query Sql
* @return Result set
*/
private static SimpleResultSet getChromosomes(String query) {
Ignite ignite = Ignition.localIgnite();
List<Chromosome> chromosomes = GAGridUtils.getChromosomes(ignite, query);
SimpleResultSet rs2 = new SimpleResultSet();
Chromosome aChrom = chromosomes.get(0);
int genesCount = aChrom.getGenes().length;
rs2.addColumn("Chromosome Id", Types.INTEGER, 0, 0);
rs2.addColumn("Fitness Score", Types.DOUBLE, 0, 0);
for (int i = 0; i < genesCount; i++) {
int columnIndex = i + 1;
rs2.addColumn("Gene " + columnIndex, Types.VARCHAR, 0, 0);
}
for (Chromosome rowChrom : chromosomes) {
Object[] row = new Object[genesCount + 2];
row[0] = rowChrom.id();
row[1] = rowChrom.getFitnessScore();
List<Gene> genes = GAGridUtils.getGenesInOrderForChromosome(ignite, rowChrom);
int i = 2;
for (Gene gene : genes) {
row[i] = gene.getValue().toString();
i = i + 1;
}
// Add a row for an individual Chromosome
rs2.addRow(row);
}
return rs2;
}
use of org.h2.command.dml.Set in project ignite by apache.
the class DmlAstUtils method selectForDelete.
/**
* Generate SQL SELECT based on DELETE's WHERE, LIMIT, etc.
*
* @param del Delete statement.
* @param keysParamIdx Index for .
* @return SELECT statement.
*/
public static GridSqlSelect selectForDelete(GridSqlDelete del, @Nullable Integer keysParamIdx) {
GridSqlSelect mapQry = new GridSqlSelect();
mapQry.from(del.from());
Set<GridSqlTable> tbls = new HashSet<>();
collectAllGridTablesInTarget(del.from(), tbls);
assert tbls.size() == 1 : "Failed to determine target table for DELETE";
GridSqlTable tbl = tbls.iterator().next();
GridH2Table gridTbl = tbl.dataTable();
assert gridTbl != null : "Failed to determine target grid table for DELETE";
Column h2KeyCol = gridTbl.getColumn(GridH2KeyValueRowOnheap.KEY_COL);
Column h2ValCol = gridTbl.getColumn(GridH2KeyValueRowOnheap.VAL_COL);
GridSqlColumn keyCol = new GridSqlColumn(h2KeyCol, tbl, h2KeyCol.getName());
keyCol.resultType(GridSqlType.fromColumn(h2KeyCol));
GridSqlColumn valCol = new GridSqlColumn(h2ValCol, tbl, h2ValCol.getName());
valCol.resultType(GridSqlType.fromColumn(h2ValCol));
mapQry.addColumn(keyCol, true);
mapQry.addColumn(valCol, true);
GridSqlElement where = del.where();
if (keysParamIdx != null)
where = injectKeysFilterParam(where, keyCol, keysParamIdx);
mapQry.where(where);
mapQry.limit(del.limit());
return mapQry;
}
Aggregations