use of org.apache.calcite.rel.RelRoot in project druid by druid-io.
the class DruidPlanner method plan.
/**
* Plan an SQL query for execution, returning a {@link PlannerResult} which can be used to actually execute the query.
*
* Ideally, the query can be planned into a native Druid query, using {@link #planWithDruidConvention}, but will
* fall-back to {@link #planWithBindableConvention} if this is not possible.
*
* In some future this could perhaps re-use some of the work done by {@link #validate()}
* instead of repeating it, but that day is not today.
*/
public PlannerResult plan() throws SqlParseException, ValidationException, RelConversionException {
resetPlanner();
final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()));
try {
if (parsed.getIngestionGranularity() != null) {
plannerContext.getQueryContext().put(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, plannerContext.getJsonMapper().writeValueAsString(parsed.getIngestionGranularity()));
}
} catch (JsonProcessingException e) {
throw new ValidationException("Unable to serialize partition granularity.");
}
// the planner's type factory is not available until after parsing
this.rexBuilder = new RexBuilder(planner.getTypeFactory());
final SqlNode parameterizedQueryNode = rewriteDynamicParameters(parsed.getQueryNode());
final SqlNode validatedQueryNode = planner.validate(parameterizedQueryNode);
final RelRoot rootQueryRel = planner.rel(validatedQueryNode);
try {
return planWithDruidConvention(rootQueryRel, parsed.getExplainNode(), parsed.getInsertNode());
} catch (Exception e) {
Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class);
if (null == cannotPlanException) {
// Not a CannotPlanningException, rethrow without trying with bindable
throw e;
}
// any error, if it is plannable by the bindable convention
if (parsed.getInsertNode() == null) {
// Try again with BINDABLE convention. Used for querying Values and metadata tables.
try {
return planWithBindableConvention(rootQueryRel, parsed.getExplainNode());
} catch (Exception e2) {
e.addSuppressed(e2);
}
}
Logger logger = log;
if (!QueryContexts.isDebug(plannerContext.getQueryContext())) {
logger = log.noStackTrace();
}
String errorMessage = buildSQLPlanningErrorMessage(cannotPlanException);
logger.warn(e, errorMessage);
throw new UnsupportedSQLQueryException(errorMessage);
}
}
use of org.apache.calcite.rel.RelRoot in project druid by druid-io.
the class DruidPlanner method possiblyWrapRootWithOuterLimitFromContext.
/**
* This method wraps the root with a {@link LogicalSort} that applies a limit (no ordering change). If the outer rel
* is already a {@link Sort}, we can merge our outerLimit into it, similar to what is going on in
* {@link org.apache.druid.sql.calcite.rule.SortCollapseRule}.
*
* The {@link PlannerContext#CTX_SQL_OUTER_LIMIT} flag that controls this wrapping is meant for internal use only by
* the web console, allowing it to apply a limit to queries without rewriting the original SQL.
*
* @param root root node
* @return root node wrapped with a limiting logical sort if a limit is specified in the query context.
*/
@Nullable
private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) {
Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT);
Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true);
if (outerLimit == null) {
return root;
}
final LogicalSort newRootRel;
if (root.rel instanceof Sort) {
Sort sort = (Sort) root.rel;
final OffsetLimit originalOffsetLimit = OffsetLimit.fromSort(sort);
final OffsetLimit newOffsetLimit = originalOffsetLimit.andThen(new OffsetLimit(0, outerLimit));
if (newOffsetLimit.equals(originalOffsetLimit)) {
// nothing to do, don't bother to make a new sort
return root;
}
newRootRel = LogicalSort.create(sort.getInput(), sort.collation, newOffsetLimit.getOffsetAsRexNode(rexBuilder), newOffsetLimit.getLimitAsRexNode(rexBuilder));
} else {
newRootRel = LogicalSort.create(root.rel, root.collation, null, new OffsetLimit(0, outerLimit).getLimitAsRexNode(rexBuilder));
}
return new RelRoot(newRootRel, root.validatedRowType, root.kind, root.fields, root.collation);
}
use of org.apache.calcite.rel.RelRoot in project calcite by apache.
the class SqlToRelConverter method convertMultisets.
private RelNode convertMultisets(final List<SqlNode> operands, Blackboard bb) {
// NOTE: Wael 2/04/05: this implementation is not the most efficient in
// terms of planning since it generates XOs that can be reduced.
final List<Object> joinList = new ArrayList<>();
List<SqlNode> lastList = new ArrayList<>();
for (int i = 0; i < operands.size(); i++) {
SqlNode operand = operands.get(i);
if (!(operand instanceof SqlCall)) {
lastList.add(operand);
continue;
}
final SqlCall call = (SqlCall) operand;
final RelNode input;
switch(call.getKind()) {
case MULTISET_VALUE_CONSTRUCTOR:
case ARRAY_VALUE_CONSTRUCTOR:
final SqlNodeList list = new SqlNodeList(call.getOperandList(), call.getParserPosition());
CollectNamespace nss = (CollectNamespace) validator.getNamespace(call);
Blackboard usedBb;
if (null != nss) {
usedBb = createBlackboard(nss.getScope(), null, false);
} else {
usedBb = createBlackboard(new ListScope(bb.scope) {
public SqlNode getNode() {
return call;
}
}, null, false);
}
RelDataType multisetType = validator.getValidatedNodeType(call);
((SqlValidatorImpl) validator).setValidatedNodeType(list, multisetType.getComponentType());
input = convertQueryOrInList(usedBb, list, null);
break;
case MULTISET_QUERY_CONSTRUCTOR:
case ARRAY_QUERY_CONSTRUCTOR:
final RelRoot root = convertQuery(call.operand(0), false, true);
input = root.rel;
break;
default:
lastList.add(operand);
continue;
}
if (lastList.size() > 0) {
joinList.add(lastList);
}
lastList = new ArrayList<>();
Collect collect = new Collect(cluster, cluster.traitSetOf(Convention.NONE), input, validator.deriveAlias(call, i));
joinList.add(collect);
}
if (joinList.size() == 0) {
joinList.add(lastList);
}
for (int i = 0; i < joinList.size(); i++) {
Object o = joinList.get(i);
if (o instanceof List) {
@SuppressWarnings("unchecked") List<SqlNode> projectList = (List<SqlNode>) o;
final List<RexNode> selectList = new ArrayList<>();
final List<String> fieldNameList = new ArrayList<>();
for (int j = 0; j < projectList.size(); j++) {
SqlNode operand = projectList.get(j);
selectList.add(bb.convertExpression(operand));
// REVIEW angel 5-June-2005: Use deriveAliasFromOrdinal
// instead of deriveAlias to match field names from
// SqlRowOperator. Otherwise, get error Type
// 'RecordType(INTEGER EMPNO)' has no field 'EXPR$0' when
// doing select * from unnest( select multiset[empno]
// from sales.emps);
fieldNameList.add(SqlUtil.deriveAliasFromOrdinal(j));
}
relBuilder.push(LogicalValues.createOneRow(cluster)).projectNamed(selectList, fieldNameList, true);
joinList.set(i, relBuilder.build());
}
}
RelNode ret = (RelNode) joinList.get(0);
for (int i = 1; i < joinList.size(); i++) {
RelNode relNode = (RelNode) joinList.get(i);
ret = RelFactories.DEFAULT_JOIN_FACTORY.createJoin(ret, relNode, rexBuilder.makeLiteral(true), ImmutableSet.<CorrelationId>of(), JoinRelType.INNER, false);
}
return ret;
}
Aggregations