use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridSqlQuerySplitter method split.
/**
* @param conn Connection.
* @param prepared Prepared.
* @param params Parameters.
* @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
* @param distributedJoins If distributed joins enabled.
* @param enforceJoinOrder Enforce join order.
* @param h2 Indexing.
* @return Two step query.
* @throws SQLException If failed.
* @throws IgniteCheckedException If failed.
*/
public static GridCacheTwoStepQuery split(Connection conn, Prepared prepared, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
if (params == null)
params = GridCacheSqlQuery.EMPTY_PARAMS;
// Here we will just do initial query parsing. Do not use optimized
// subqueries because we do not have unique FROM aliases yet.
GridSqlQuery qry = parse(prepared, false);
String originalSql = qry.getSQL();
// debug("ORIGINAL", originalSql);
final boolean explain = qry.explain();
qry.explain(false);
GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
// Normalization will generate unique aliases for all the table filters in FROM.
// Also it will collect all tables and schemas from the query.
splitter.normalizeQuery(qry);
// debug("NORMALIZED", qry.getSQL());
// Here we will have correct normalized AST with optimized join order.
// The distributedJoins parameter is ignored because it is not relevant for
// the REDUCE query optimization.
qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
// Do the actual query split. We will update the original query AST, need to be careful.
splitter.splitQuery(qry);
// We must have at least one map query.
assert !F.isEmpty(splitter.mapSqlQrys) : "map";
// We must have a reduce query.
assert splitter.rdcSqlQry != null : "rdc";
// distributed joins at all.
if (distributedJoins) {
boolean allCollocated = true;
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
Prepared prepared0 = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
allCollocated &= isCollocated((Query) prepared0);
mapSqlQry.query(parse(prepared0, true).getSQL());
}
// We do not need distributed joins if all MAP queries are collocated.
if (allCollocated)
distributedJoins = false;
}
// Setup resulting two step query and return it.
GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
twoStepQry.reduceQuery(splitter.rdcSqlQry);
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
twoStepQry.skipMergeTable(splitter.rdcQrySimple);
twoStepQry.explain(explain);
twoStepQry.distributedJoins(distributedJoins);
// all map queries must have non-empty derivedPartitions to use this feature.
twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
return twoStepQry;
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param qryId Query ID.
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @param mvccTracker Query tracker.
* @param dataPageScanEnabled If data page scan is enabled.
* @param pageSize Page size.
* @return Rows iterator.
*/
@SuppressWarnings("IfMayBeConditional")
public Iterator<List<?>> query(long qryId, String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, int[] parts, boolean lazy, MvccQueryTracker mvccTracker, Boolean dataPageScanEnabled, int pageSize) {
assert !qry.mvccEnabled() || mvccTracker != null;
if (pageSize <= 0)
pageSize = Query.DFLT_PAGE_SIZE;
// If explicit partitions are set, but there are no real tables, ignore.
if (!qry.hasCacheIds() && parts != null)
parts = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null && qry.isReplicatedOnly())
throw new CacheException("Partitions are not supported for replicated caches");
try {
if (qry.mvccEnabled())
checkActive(tx(ctx));
} catch (IgniteTxAlreadyCompletedCheckedException e) {
throw new TransactionAlreadyCompletedException(e.getMessage(), e);
}
final boolean singlePartMode = parts != null && parts.length == 1;
if (F.isEmpty(params))
params = EMPTY_PARAMS;
List<Integer> cacheIds = qry.cacheIds();
List<GridCacheSqlQuery> mapQueries = prepareMapQueries(qry, params, singlePartMode);
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable() || singlePartMode;
final long retryTimeout = retryTimeout(timeoutMillis);
final long qryStartTime = U.currentTimeMillis();
ReduceQueryRun lastRun = null;
for (int attempt = 0; ; attempt++) {
ensureQueryNotCancelled(cancel);
if (attempt > 0) {
throttleOnRetry(lastRun, qryStartTime, retryTimeout, attempt);
ensureQueryNotCancelled(cancel);
}
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology has changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
ReducePartitionMapResult mapping = createMapping(qry, parts, cacheIds, topVer);
if (// Can't map query.
mapping == null)
// Retry.
continue;
final Collection<ClusterNode> nodes = mapping.nodes();
final Map<ClusterNode, Integer> nodeToSegmentsCnt = createNodeToSegmentsCountMapping(qry, mapping);
assert !F.isEmpty(nodes);
H2PooledConnection conn = h2.connections().connection(schemaName);
final long qryReqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(qryReqId);
boolean release = true;
try {
final ReduceQueryRun r = createReduceQueryRun(conn, mapQueries, nodes, pageSize, nodeToSegmentsCnt, skipMergeTbl, qry.explain(), dataPageScanEnabled);
runs.put(qryReqId, r);
try {
cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true));
GridH2QueryRequest req = new GridH2QueryRequest().queryId(qryId).requestId(qryReqId).topologyVersion(topVer).pageSize(pageSize).caches(qry.cacheIds()).tables(qry.distributedJoins() ? qry.tables() : null).partitions(convert(mapping.partitionsMap())).queries(mapQueries).parameters(params).flags(queryFlags(qry, enforceJoinOrder, lazy, dataPageScanEnabled)).timeout(timeoutMillis).explicitTimeout(true).schemaName(schemaName);
if (mvccTracker != null)
req.mvccSnapshot(mvccTracker.snapshot());
final C2<ClusterNode, Message, Message> spec = parts == null ? null : new ReducePartitionsSpecializer(mapping.queryPartitionsMap());
boolean retry = false;
if (send(nodes, req, spec, false)) {
awaitAllReplies(r, nodes, cancel);
if (r.hasErrorOrRetry()) {
CacheException err = r.exception();
if (err != null) {
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
else if (QueryUtils.wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw err;
}
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
retry = true;
}
} else
retry = true;
if (retry) {
lastRun = runs.get(qryReqId);
assert lastRun != null;
// Retry.
continue;
}
Iterator<List<?>> resIter;
if (skipMergeTbl) {
resIter = new ReduceIndexIterator(this, nodes, r, qryReqId, qry.distributedJoins(), mvccTracker, ctx.tracing());
release = false;
U.close(conn, log);
} else {
ensureQueryNotCancelled(cancel);
QueryContext qctx = new QueryContext(0, null, null, null, null, true);
H2Utils.setupConnection(conn, qctx, false, enforceJoinOrder);
if (qry.explain())
return explainPlan(conn, qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
final PreparedStatement stmt = conn.prepareStatementNoCache(rdc.query());
H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params)));
ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId);
ResultSet res = h2.executeSqlQueryWithTimer(stmt, conn, rdc.query(), timeoutMillis, cancel, dataPageScanEnabled, qryInfo);
resIter = new H2FieldsIterator(res, mvccTracker, conn, r.pageSize(), log, h2, qryInfo, ctx.tracing());
conn = null;
// To prevent callback inside finally block;
mvccTracker = null;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
if (e instanceof CacheException) {
if (QueryUtils.wasCancelled(e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally. " + cause.getMessage(), cause);
} finally {
if (release) {
releaseRemoteResources(nodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
if (!skipMergeTbl) {
for (int i = 0, mapQrys = mapQueries.size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
} finally {
if (conn != null && release)
U.close(conn, log);
}
}
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridReduceQueryExecutor method prepareMapQueryForSinglePartition.
/**
* Prepare map query based on original sql.
*
* @param qry Two step query.
* @param params Query parameters.
* @return Updated map query list with one map query.
*/
private List<GridCacheSqlQuery> prepareMapQueryForSinglePartition(GridCacheTwoStepQuery qry, Object[] params) {
boolean hasSubQries = false;
for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
if (mapQry.hasSubQueries()) {
hasSubQries = true;
break;
}
}
GridCacheSqlQuery originalQry = new GridCacheSqlQuery(qry.originalSql());
if (!F.isEmpty(params)) {
int[] paramIdxs = new int[params.length];
for (int i = 0; i < params.length; i++) paramIdxs[i] = i;
originalQry.parameterIndexes(paramIdxs);
}
originalQry.partitioned(true);
originalQry.hasSubQueries(hasSubQries);
return Collections.singletonList(originalQry);
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridSqlQuerySplitter method split0.
/**
* @param conn Connection.
* @param qry Query.
* @param originalSql Original SQL query string.
* @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
* @param distributedJoins If distributed joins enabled.
* @param enforceJoinOrder Enforce join order.
* @param locSplit Whether this is a split for local query.
* @param idx Indexing.
* @param paramsCnt Parameters count.
* @return Two step query.
* @throws SQLException If failed.
* @throws IgniteCheckedException If failed.
*/
private static GridCacheTwoStepQuery split0(H2PooledConnection conn, GridSqlQuery qry, String originalSql, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, boolean locSplit, IgniteH2Indexing idx, int paramsCnt, IgniteLogger log) throws SQLException, IgniteCheckedException {
final boolean explain = qry.explain();
qry.explain(false);
GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(paramsCnt, collocatedGrpBy, distributedJoins, locSplit, idx.partitionExtractor(), log);
// Normalization will generate unique aliases for all the table filters in FROM.
// Also it will collect all tables and schemas from the query.
splitter.normalizeQuery(qry);
// Here we will have correct normalized AST with optimized join order.
// The distributedJoins parameter is ignored because it is not relevant for
// the REDUCE query optimization.
qry = GridSqlQueryParser.parseQuery(prepare(conn, H2Utils.context(conn), qry.getSQL(), false, enforceJoinOrder), true, log);
// Do the actual query split. We will update the original query AST, need to be careful.
splitter.splitQuery(qry);
// We must have at least one map query.
assert !F.isEmpty(splitter.mapSqlQrys) : "map";
// We must have a reduce query.
assert splitter.rdcSqlQry != null : "rdc";
// distributed joins at all.
if (distributedJoins) {
boolean allCollocated = true;
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
Prepared prepared0 = prepare(conn, H2Utils.context(conn), mapSqlQry.query(), true, enforceJoinOrder);
allCollocated &= isCollocated((Query) prepared0);
mapSqlQry.query(GridSqlQueryParser.parseQuery(prepared0, true, log).getSQL());
}
// We do not need distributed joins if all MAP queries are collocated.
if (allCollocated)
distributedJoins = false;
}
List<Integer> cacheIds = H2Utils.collectCacheIds(idx, null, splitter.tbls);
boolean mvccEnabled = H2Utils.collectMvccEnabled(idx, cacheIds);
boolean replicatedOnly = splitter.mapSqlQrys.stream().noneMatch(GridCacheSqlQuery::isPartitioned);
boolean treatReplicatedAsPartitioned = splitter.mapSqlQrys.stream().anyMatch(GridCacheSqlQuery::treatReplicatedAsPartitioned);
H2Utils.checkQuery(idx, cacheIds, splitter.tbls);
// Setup resulting two step query and return it.
return new GridCacheTwoStepQuery(originalSql, paramsCnt, splitter.tbls, splitter.rdcSqlQry, splitter.mapSqlQrys, splitter.skipMergeTbl, explain, distributedJoins, replicatedOnly, splitter.extractor.mergeMapQueries(splitter.mapSqlQrys), cacheIds, mvccEnabled, locSplit, treatReplicatedAsPartitioned);
}
use of org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery in project ignite by apache.
the class GridSqlQuerySplitter method splitSelect.
/**
* !!! Notice that here we will modify the original query AST in this method.
*
* @param parent Parent AST element.
* @param childIdx Index of child select.
*/
private void splitSelect(GridSqlAst parent, int childIdx) throws IgniteCheckedException {
if (++splitId > 99)
throw new CacheException("Too complex query to process.");
final GridSqlSelect mapQry = parent.child(childIdx);
final int visibleCols = mapQry.visibleColumns();
List<GridSqlAst> rdcExps = new ArrayList<>(visibleCols);
List<GridSqlAst> mapExps = new ArrayList<>(mapQry.allColumns());
mapExps.addAll(mapQry.columns(false));
Set<String> colNames = new HashSet<>();
final int havingCol = mapQry.havingColumn();
boolean distinctAggregateFound = false;
if (!collocatedGrpBy) {
for (int i = 0, len = mapExps.size(); i < len; i++) distinctAggregateFound |= SplitterUtils.hasDistinctAggregates(mapExps.get(i));
}
boolean aggregateFound = distinctAggregateFound;
// Split all select expressions into map-reduce parts.
for (// Remember len because mapExps list can grow.
int i = 0, len = mapExps.size(); // Remember len because mapExps list can grow.
i < len; // Remember len because mapExps list can grow.
i++) aggregateFound |= splitSelectExpression(mapExps, rdcExps, colNames, i, collocatedGrpBy, i == havingCol, distinctAggregateFound);
// We do not split aggregates when collocatedGrpBy is true.
assert !(collocatedGrpBy && aggregateFound);
// Create reduce query AST. Use unique merge table for this split.
GridSqlSelect rdcQry = new GridSqlSelect().from(mergeTable(splitId));
// -- SELECT
mapQry.clearColumns();
for (// Add all map expressions as visible.
GridSqlAst exp : // Add all map expressions as visible.
mapExps) mapQry.addColumn(exp, true);
for (// Add visible reduce columns.
int i = 0; // Add visible reduce columns.
i < visibleCols; // Add visible reduce columns.
i++) rdcQry.addColumn(rdcExps.get(i), true);
for (// Add invisible reduce columns (HAVING).
int i = visibleCols; // Add invisible reduce columns (HAVING).
i < rdcExps.size(); // Add invisible reduce columns (HAVING).
i++) rdcQry.addColumn(rdcExps.get(i), false);
for (// Add all extra map columns as invisible reduce columns.
int i = rdcExps.size(); // Add all extra map columns as invisible reduce columns.
i < mapExps.size(); // Add all extra map columns as invisible reduce columns.
i++) rdcQry.addColumn(SplitterUtils.column(((GridSqlAlias) mapExps.get(i)).alias()), false);
// -- GROUP BY
if (mapQry.groupColumns() != null && !collocatedGrpBy) {
rdcQry.groupColumns(mapQry.groupColumns());
// Grouping with distinct aggregates cannot be performed on map phase
if (distinctAggregateFound)
mapQry.groupColumns(null);
}
// -- HAVING
if (havingCol >= 0 && !collocatedGrpBy) {
// We need to find HAVING column in reduce query.
for (int i = visibleCols; i < rdcQry.allColumns(); i++) {
GridSqlAst c = rdcQry.column(i);
if (c instanceof GridSqlAlias && HAVING_COLUMN.equals(((GridSqlAlias) c).alias())) {
rdcQry.havingColumn(i);
break;
}
}
mapQry.havingColumn(-1);
}
// -- ORDER BY
if (!mapQry.sort().isEmpty()) {
for (GridSqlSortColumn sortCol : mapQry.sort()) rdcQry.addSort(sortCol);
// If collocatedGrpBy is true, then aggregateFound is always false.
if (// Ordering over aggregates does not make sense.
aggregateFound)
// Otherwise map sort will be used by offset-limit.
mapQry.clearSort();
}
// -- LIMIT
if (mapQry.limit() != null) {
rdcQry.limit(mapQry.limit());
// because in this case aggregateFound is always false.
if (aggregateFound)
mapQry.limit(null);
}
// -- OFFSET
if (mapQry.offset() != null) {
rdcQry.offset(mapQry.offset());
if (// LIMIT off + lim
mapQry.limit() != null)
mapQry.limit(SplitterUtils.op(GridSqlOperationType.PLUS, mapQry.offset(), mapQry.limit()));
mapQry.offset(null);
}
// -- DISTINCT
if (mapQry.distinct()) {
mapQry.distinct(!aggregateFound && mapQry.groupColumns() == null && mapQry.havingColumn() < 0);
rdcQry.distinct(true);
}
// Replace the given select with generated reduce query in the parent.
parent.child(childIdx, rdcQry);
// Setup resulting map query.
GridCacheSqlQuery map = new GridCacheSqlQuery(mapQry.getSQL());
setupParameters(map, mapQry, paramsCnt);
SqlAstTraverser traverser = new SqlAstTraverser(mapQry, distributedJoins, log);
traverser.traverse();
map.columns(collectColumns(mapExps));
map.sortColumns(mapQry.sort());
map.partitioned(traverser.hasPartitionedTables());
map.hasSubQueries(traverser.hasSubQueries());
map.treatReplicatedAsPartitioned(traverser.hasOuterJoinReplicatedPartitioned() || traverser.hasReplicatedWithPartitionedAndSubQuery());
if (map.isPartitioned() && canExtractPartitions)
map.derivedPartitions(extractor.extract(mapQry));
mapSqlQrys.add(map);
}
Aggregations