use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.Planner in project apex-malhar by apache.
the class SQLExecEnvironment method executeSQL.
/**
* This is the main method takes SQL statement as input and contructs a DAG using contructs registered with this
* {@link SQLExecEnvironment}.
*
* @param sql SQL statement that should be converted to a DAG.
*/
public void executeSQL(DAG dag, String sql) {
FrameworkConfig config = buildFrameWorkConfig();
Planner planner = Frameworks.getPlanner(config);
try {
logger.info("Parsing SQL statement: {}", sql);
SqlNode parsedTree = planner.parse(sql);
SqlNode validatedTree = planner.validate(parsedTree);
RelNode relationalTree = planner.rel(validatedTree).rel;
logger.info("RelNode relationalTree generate from SQL statement is:\n {}", Util.toLinux(RelOptUtil.toString(relationalTree)));
RelNodeVisitor visitor = new RelNodeVisitor(dag, typeFactory);
visitor.traverse(relationalTree);
} catch (Exception e) {
throw Throwables.propagate(e);
} finally {
planner.close();
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.Planner in project storm by apache.
the class TestCompilerUtils method sqlOverNestedTable.
public static CalciteState sqlOverNestedTable(String sql) throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("ID", SqlTypeName.INTEGER).field("MAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)).field("NESTEDMAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)), true)).field("ARRAYFIELD", typeFactory.createTypeWithNullability(typeFactory.createArrayType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L), true)).build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.Planner in project streamline by hortonworks.
the class TestCompilerUtils method sqlOverDummyTable.
public static CalciteState sqlOverDummyTable(String sql) throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("ID", SqlTypeName.INTEGER).field("NAME", typeFactory.createType(String.class)).field("ADDR", typeFactory.createType(String.class)).build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.Planner in project streamline by hortonworks.
the class StreamlineSqlImpl method execute.
@Override
public void execute(Iterable<String> statements, ChannelHandler result) throws Exception {
Map<String, DataSource> dataSources = new HashMap<>();
for (String sql : statements) {
StreamlineParser parser = new StreamlineParser(sql);
SqlNode node = parser.impl().parseSqlStmtEof();
if (node instanceof SqlCreateTable) {
handleCreateTable((SqlCreateTable) node, dataSources);
} else if (node instanceof SqlCreateFunction) {
handleCreateFunction((SqlCreateFunction) node);
} else {
FrameworkConfig config = buildFrameWorkConfig();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
PlanCompiler compiler = new PlanCompiler(typeFactory);
AbstractValuesProcessor proc = compiler.compile(tree);
proc.initialize(dataSources, result);
}
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.Planner in project beam by apache.
the class BeamJoinRel method getBoundednessOfRelNode.
/**
* This method returns the Boundedness of a RelNode. It is used during planning and applying
* {@link org.apache.beam.sdk.extensions.sql.impl.rule.BeamCoGBKJoinRule} and {@link
* org.apache.beam.sdk.extensions.sql.impl.rule.BeamSideInputJoinRule}
*
* <p>The Volcano planner works in a top-down fashion. It starts by transforming the root and move
* towards the leafs of the plan. Due to this when transforming a logical join its inputs are
* still in the logical convention. So, Recursively visit the inputs of the RelNode till
* BeamIOSourceRel is encountered and propagate the boundedness upwards.
*
* <p>The Boundedness of each child of a RelNode is stored in a list. If any of the children are
* Unbounded, the RelNode is Unbounded. Else, the RelNode is Bounded.
*
* @param relNode the RelNode whose Boundedness has to be determined
* @return {@code PCollection.isBounded}
*/
public static PCollection.IsBounded getBoundednessOfRelNode(RelNode relNode) {
if (relNode instanceof BeamRelNode) {
return (((BeamRelNode) relNode).isBounded());
}
List<PCollection.IsBounded> boundednessOfInputs = new ArrayList<>();
for (RelNode inputRel : relNode.getInputs()) {
if (inputRel instanceof RelSubset) {
// Consider the RelNode with best cost in the RelSubset. If best cost RelNode cannot be
// determined, consider the first RelNode in the RelSubset
RelNode rel = ((RelSubset) inputRel).getBest();
if (rel == null) {
rel = ((RelSubset) inputRel).getRelList().get(0);
}
boundednessOfInputs.add(getBoundednessOfRelNode(rel));
} else {
boundednessOfInputs.add(getBoundednessOfRelNode(inputRel));
}
}
// If one of the input is Unbounded, the result is Unbounded.
return (boundednessOfInputs.contains(PCollection.IsBounded.UNBOUNDED) ? PCollection.IsBounded.UNBOUNDED : PCollection.IsBounded.BOUNDED);
}
Aggregations