use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param tbls Tables.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
*/
private void onQueryRequest0(ClusterNode node, long reqId, int segmentId, String schemaName, Collection<GridCacheSqlQuery> qrys, List<Integer> cacheIds, AffinityTopologyVersion topVer, Map<UUID, int[]> partsMap, int[] parts, Collection<QueryTable> tbls, int pageSize, DistributedJoinMode distributedJoinMode, boolean enforceJoinOrder, boolean replicated, int timeout, Object[] params) {
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
NodeResults nodeRess = resultsForNode(node.id());
QueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new QueryResults(reqId, qrys.size(), mainCctx != null ? mainCctx.name() : null);
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
List<GridH2Table> snapshotedTbls = null;
if (!F.isEmpty(tbls)) {
snapshotedTbls = new ArrayList<>(tbls.size());
for (QueryTable tbl : tbls) {
GridH2Table h2Tbl = h2.dataTable(tbl);
Objects.requireNonNull(h2Tbl, tbl.toString());
h2Tbl.snapshotIndexes(qctx, segmentId);
snapshotedTbls.add(h2Tbl);
}
}
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && ctx.event().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.cancels[qryIdx]);
if (evt) {
assert mainCctx != null;
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.canceled) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
} finally {
GridH2QueryContext.clearThreadLocal();
if (distributedJoinMode == OFF)
qctx.clearContext(false);
if (!F.isEmpty(snapshotedTbls)) {
for (GridH2Table dataTbl : snapshotedTbls) dataTbl.releaseSnapshots();
}
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridSqlQuerySplitter method split.
/**
* @param stmt Prepared statement.
* @param params Parameters.
* @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
* @param distributedJoins If distributed joins enabled.
* @param enforceJoinOrder Enforce join order.
* @param h2 Indexing.
* @return Two step query.
* @throws SQLException If failed.
* @throws IgniteCheckedException If failed.
*/
public static GridCacheTwoStepQuery split(JdbcPreparedStatement stmt, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
if (params == null)
params = GridCacheSqlQuery.EMPTY_PARAMS;
// Here we will just do initial query parsing. Do not use optimized
// subqueries because we do not have unique FROM aliases yet.
GridSqlQuery qry = parse(prepared(stmt), false);
String originalSql = qry.getSQL();
final boolean explain = qry.explain();
qry.explain(false);
GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
// Normalization will generate unique aliases for all the table filters in FROM.
// Also it will collect all tables and schemas from the query.
splitter.normalizeQuery(qry);
Connection conn = stmt.getConnection();
// Here we will have correct normalized AST with optimized join order.
// The distributedJoins parameter is ignored because it is not relevant for
// the REDUCE query optimization.
qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
// Do the actual query split. We will update the original query AST, need to be careful.
splitter.splitQuery(qry);
// We must have at least one map query.
assert !F.isEmpty(splitter.mapSqlQrys) : "map";
// We must have a reduce query.
assert splitter.rdcSqlQry != null : "rdc";
// distributed joins at all.
if (distributedJoins) {
boolean allCollocated = true;
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
Prepared prepared = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
allCollocated &= isCollocated((Query) prepared);
mapSqlQry.query(parse(prepared, true).getSQL());
}
// We do not need distributed joins if all MAP queries are collocated.
if (allCollocated)
distributedJoins = false;
}
// Setup resulting two step query and return it.
GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
twoStepQry.reduceQuery(splitter.rdcSqlQry);
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
twoStepQry.skipMergeTable(splitter.rdcQrySimple);
twoStepQry.explain(explain);
twoStepQry.distributedJoins(distributedJoins);
// all map queries must have non-empty derivedPartitions to use this feature.
twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
return twoStepQry;
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridReduceQueryExecutor method explainPlan.
/**
* @param c Connection.
* @param qry Query.
* @param params Query parameters.
* @return Cursor for plans.
* @throws IgniteCheckedException if failed.
*/
private Iterator<List<?>> explainPlan(H2PooledConnection c, GridCacheTwoStepQuery qry, Object[] params) throws IgniteCheckedException {
List<List<?>> lists = new ArrayList<>(qry.mapQueries().size() + 1);
for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) {
ResultSet rs = h2.executeSqlQueryWithTimer(c, "SELECT PLAN FROM " + mergeTableIdentifier(i), null, 0, null, null, null);
lists.add(F.asList(getPlan(rs)));
}
int tblIdx = 0;
for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
ReduceTable tbl = createMergeTable(c, mapQry, false);
fakeTable(c, tblIdx++).innerTable(tbl);
}
GridCacheSqlQuery rdc = qry.reduceQuery();
ResultSet rs = h2.executeSqlQueryWithTimer(c, "EXPLAIN " + rdc.query(), F.asList(rdc.parameters(params)), 0, null, null, null);
lists.add(F.asList(getPlan(rs)));
return lists.iterator();
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridReduceQueryExecutor method createReduceQueryRun.
/**
* Query run factory method.
*
* @param conn H2 connection.
* @param mapQueries Map queries.
* @param nodes Target nodes.
* @param pageSize Page size.
* @param nodeToSegmentsCnt Segments per-index.
* @param skipMergeTbl Skip merge table flag.
* @param explain Explain query flag.
* @param dataPageScanEnabled DataPage scan enabled flag.
* @return Reduce query run.
*/
@NotNull
private ReduceQueryRun createReduceQueryRun(H2PooledConnection conn, List<GridCacheSqlQuery> mapQueries, Collection<ClusterNode> nodes, int pageSize, Map<ClusterNode, Integer> nodeToSegmentsCnt, boolean skipMergeTbl, boolean explain, Boolean dataPageScanEnabled) {
final ReduceQueryRun r = new ReduceQueryRun(mapQueries.size(), pageSize, dataPageScanEnabled);
int tblIdx = 0;
int replicatedQrysCnt = 0;
for (GridCacheSqlQuery mapQry : mapQueries) {
Reducer reducer;
if (skipMergeTbl)
reducer = UnsortedOneWayReducer.createDummy(ctx);
else {
ReduceTable tbl;
try {
tbl = createMergeTable(conn, mapQry, explain);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
reducer = tbl.getReducer();
fakeTable(conn, tblIdx++).innerTable(tbl);
}
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
reducer.setSources(singletonMap(node, 1));
} else
reducer.setSources(nodeToSegmentsCnt);
reducer.setPageSize(r.pageSize());
r.reducers().add(reducer);
}
int cnt = nodeToSegmentsCnt.values().stream().mapToInt(i -> i).sum();
r.init((r.reducers().size() - replicatedQrysCnt) * cnt + replicatedQrysCnt);
return r;
}
Aggregations