use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.RelRoot in project druid by druid-io.
the class DruidPlanner method plan.
/**
* Plan an SQL query for execution, returning a {@link PlannerResult} which can be used to actually execute the query.
*
* Ideally, the query can be planned into a native Druid query, using {@link #planWithDruidConvention}, but will
* fall-back to {@link #planWithBindableConvention} if this is not possible.
*
* In some future this could perhaps re-use some of the work done by {@link #validate()}
* instead of repeating it, but that day is not today.
*/
public PlannerResult plan() throws SqlParseException, ValidationException, RelConversionException {
resetPlanner();
final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()));
try {
if (parsed.getIngestionGranularity() != null) {
plannerContext.getQueryContext().put(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, plannerContext.getJsonMapper().writeValueAsString(parsed.getIngestionGranularity()));
}
} catch (JsonProcessingException e) {
throw new ValidationException("Unable to serialize partition granularity.");
}
// the planner's type factory is not available until after parsing
this.rexBuilder = new RexBuilder(planner.getTypeFactory());
final SqlNode parameterizedQueryNode = rewriteDynamicParameters(parsed.getQueryNode());
final SqlNode validatedQueryNode = planner.validate(parameterizedQueryNode);
final RelRoot rootQueryRel = planner.rel(validatedQueryNode);
try {
return planWithDruidConvention(rootQueryRel, parsed.getExplainNode(), parsed.getInsertNode());
} catch (Exception e) {
Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class);
if (null == cannotPlanException) {
// Not a CannotPlanningException, rethrow without trying with bindable
throw e;
}
// any error, if it is plannable by the bindable convention
if (parsed.getInsertNode() == null) {
// Try again with BINDABLE convention. Used for querying Values and metadata tables.
try {
return planWithBindableConvention(rootQueryRel, parsed.getExplainNode());
} catch (Exception e2) {
e.addSuppressed(e2);
}
}
Logger logger = log;
if (!QueryContexts.isDebug(plannerContext.getQueryContext())) {
logger = log.noStackTrace();
}
String errorMessage = buildSQLPlanningErrorMessage(cannotPlanException);
logger.warn(e, errorMessage);
throw new UnsupportedSQLQueryException(errorMessage);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.RelRoot in project druid by druid-io.
the class DruidPlanner method possiblyWrapRootWithOuterLimitFromContext.
/**
* This method wraps the root with a {@link LogicalSort} that applies a limit (no ordering change). If the outer rel
* is already a {@link Sort}, we can merge our outerLimit into it, similar to what is going on in
* {@link org.apache.druid.sql.calcite.rule.SortCollapseRule}.
*
* The {@link PlannerContext#CTX_SQL_OUTER_LIMIT} flag that controls this wrapping is meant for internal use only by
* the web console, allowing it to apply a limit to queries without rewriting the original SQL.
*
* @param root root node
* @return root node wrapped with a limiting logical sort if a limit is specified in the query context.
*/
@Nullable
private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) {
Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT);
Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true);
if (outerLimit == null) {
return root;
}
final LogicalSort newRootRel;
if (root.rel instanceof Sort) {
Sort sort = (Sort) root.rel;
final OffsetLimit originalOffsetLimit = OffsetLimit.fromSort(sort);
final OffsetLimit newOffsetLimit = originalOffsetLimit.andThen(new OffsetLimit(0, outerLimit));
if (newOffsetLimit.equals(originalOffsetLimit)) {
// nothing to do, don't bother to make a new sort
return root;
}
newRootRel = LogicalSort.create(sort.getInput(), sort.collation, newOffsetLimit.getOffsetAsRexNode(rexBuilder), newOffsetLimit.getLimitAsRexNode(rexBuilder));
} else {
newRootRel = LogicalSort.create(root.rel, root.collation, null, new OffsetLimit(0, outerLimit).getLimitAsRexNode(rexBuilder));
}
return new RelRoot(newRootRel, root.validatedRowType, root.kind, root.fields, root.collation);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.RelRoot in project drill by axbaretto.
the class SqlConverter method toRel.
public RelRoot toRel(final SqlNode validatedNode) {
if (planner == null) {
planner = new VolcanoPlanner(costFactory, settings);
planner.setExecutor(new DrillConstExecutor(functions, util, settings));
planner.clearRelTraitDefs();
planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
planner.addRelTraitDef(DrillDistributionTraitDef.INSTANCE);
planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
}
if (cluster == null) {
initCluster();
}
final SqlToRelConverter sqlToRelConverter = new SqlToRelConverter(new Expander(), validator, catalog, cluster, DrillConvertletTable.INSTANCE, sqlToRelConverterConfig);
/*
* Sets value to false to avoid simplifying project expressions
* during creating new projects since it may cause changing data mode
* which causes to assertion errors during type validation
*/
Hook.REL_BUILDER_SIMPLIFY.add(Hook.property(false));
// To avoid unexpected column errors set a value of top to false
final RelRoot rel = sqlToRelConverter.convertQuery(validatedNode, false, false);
final RelRoot rel2 = rel.withRel(sqlToRelConverter.flattenTypes(rel.rel, true));
final RelRoot rel3 = rel2.withRel(RelDecorrelator.decorrelateQuery(rel2.rel));
return rel3;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.RelRoot in project spf4j by zolyfarkas.
the class AvroQueryTest method testAvroSql.
@Test
@SuppressFBWarnings("PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS")
@PrintLogs(category = "org.codehaus.janino", ideMinLevel = Level.INFO, greedy = true)
public void testAvroSql() throws SqlParseException, RelConversionException, ValidationException, InstantiationException, IllegalAccessException {
Schema recASchema = SchemaBuilder.record("RecordA").fields().name("id").type().intType().noDefault().requiredString("name").endRecord();
Schema subRecSchema = SchemaBuilder.record("SubRecord").fields().name("key").type().stringType().noDefault().requiredString("value").endRecord();
Schema recBSchema = SchemaBuilder.record("RecordB").fields().name("id").type().intType().noDefault().requiredString("name").requiredString("text").name("adate").type(Schemas.dateString()).noDefault().name("meta").type(Schema.createArray(subRecSchema)).noDefault().name("meta2").type(subRecSchema).noDefault().endRecord();
GenericRecordBuilder rb = new GenericRecordBuilder(recASchema, subRecSchema, recBSchema);
Class<? extends SpecificRecordBase> raC = rb.getRecordClass(recASchema);
Class<? extends SpecificRecordBase> rbC = rb.getRecordClass(recBSchema);
Class<? extends SpecificRecordBase> rsC = rb.getRecordClass(subRecSchema);
GenericRecord reca1 = raC.newInstance();
reca1.put("id", 1);
reca1.put("name", "Jim");
GenericRecord subRec = rsC.newInstance();
subRec.put("key", "key1");
subRec.put("value", "val1");
GenericRecord recb1 = rbC.newInstance();
recb1.put("id", 1);
recb1.put("name", "Beam");
recb1.put("text", "bla");
recb1.put("adate", LocalDate.now());
recb1.put("meta", Collections.singletonList(subRec));
recb1.put("meta2", subRec);
GenericRecord recb2 = rbC.newInstance();
recb2.put("id", 2);
recb2.put("name", "Xi");
recb2.put("text", "blabla");
recb2.put("adate", LocalDate.now());
recb2.put("meta", Collections.singletonList(subRec));
recb2.put("meta2", subRec);
SchemaPlus schema = Frameworks.createRootSchema(true);
schema.add("a", new AvroIteratorAsProjectableFilterableTable(recASchema, () -> CloseableIterator.from(Collections.singletonList(reca1).iterator())));
schema.add("b", new AvroIteratorAsProjectableFilterableTable(recBSchema, () -> CloseableIterator.from(Arrays.asList(recb1, recb2).iterator())));
SqlParser.Config cfg = SqlParser.configBuilder().setCaseSensitive(true).setIdentifierMaxLength(255).setLex(Lex.JAVA).build();
FrameworkConfig config = Frameworks.newConfigBuilder().parserConfig(cfg).defaultSchema(schema).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode s = planner.parse("select a.id, a.name as n1, b.name as n2," + " b.adate as adate, b.meta as firstKey, b.meta2.key as blaKey" + " from a" + " inner join b on a.id = b.id where b.text like 'bla%' or b.text like 'cucu%'");
SqlNode validated = planner.validate(s);
RelRoot rel = planner.rel(validated);
RelNode plan = rel.project();
LOG.debug("exec plan", RelOptUtil.toString(plan));
plan = PlannerUtils.pushDownPredicatesAndProjection(plan);
LOG.debug("exec plan optimized", RelOptUtil.toString(plan));
RelDataType rowType = plan.getRowType();
LOG.debug("Return row type: {}", rowType);
Schema from = Types.from(rowType);
LOG.debug("Return row schema: {}", from);
Interpreter interpreter = new Interpreter(new EmbededDataContext(new JavaTypeFactoryImpl(), null), plan);
boolean empty = true;
for (Object[] row : interpreter) {
LOG.debug("RawRow {} with schema {}", row, from);
GenericRecord record = IndexedRecords.fromRecord(from, row);
LOG.debug("Row", record);
empty = false;
}
Assert.assertFalse(empty);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.RelRoot in project calcite by apache.
the class CalcitePrepareImpl method convert_.
private ParseResult convert_(Context context, String sql, boolean analyze, boolean fail, CalciteCatalogReader catalogReader, SqlValidator validator, SqlNode sqlNode1) {
final JavaTypeFactory typeFactory = context.getTypeFactory();
final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE;
// Use the Volcano because it can handle the traits.
final VolcanoPlanner planner = new VolcanoPlanner();
planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
final SqlToRelConverter.Config config = SqlToRelConverter.config().withTrimUnusedFields(true);
final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), null, createCluster(planner, new RexBuilder(typeFactory)), resultConvention, createConvertletTable());
final SqlToRelConverter converter = preparingStmt.getSqlToRelConverter(validator, catalogReader, config);
final RelRoot root = converter.convertQuery(sqlNode1, false, true);
if (analyze) {
return analyze_(validator, sql, sqlNode1, root, fail);
}
return new ConvertResult(this, validator, sql, sqlNode1, validator.getValidatedNodeType(sqlNode1), root);
}
Aggregations