use of org.apache.calcite.tools.FrameworkConfig in project storm by apache.
the class TestCompilerUtils method sqlOverNestedTable.
public static CalciteState sqlOverNestedTable(String sql) throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("ID", SqlTypeName.INTEGER).field("MAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)).field("NESTEDMAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)), true)).field("ARRAYFIELD", typeFactory.createTypeWithNullability(typeFactory.createArrayType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L), true)).build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
use of org.apache.calcite.tools.FrameworkConfig in project druid by druid-io.
the class PlannerFactory method createPlanner.
public DruidPlanner createPlanner(final Map<String, Object> queryContext) {
final PlannerContext plannerContext = PlannerContext.create(plannerConfig, queryContext);
final QueryMaker queryMaker = new QueryMaker(walker, plannerContext);
final FrameworkConfig frameworkConfig = Frameworks.newConfigBuilder().parserConfig(SqlParser.configBuilder().setCaseSensitive(true).setUnquotedCasing(Casing.UNCHANGED).setQuotedCasing(Casing.UNCHANGED).setQuoting(Quoting.DOUBLE_QUOTE).build()).defaultSchema(rootSchema).traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE).convertletTable(new DruidConvertletTable(plannerContext)).operatorTable(operatorTable).programs(Rules.programs(queryMaker, operatorTable)).executor(new RexExecutorImpl(Schemas.createDataContext(null))).context(Contexts.EMPTY_CONTEXT).typeSystem(RelDataTypeSystem.DEFAULT).defaultSchema(rootSchema.getSubSchema(DruidSchema.NAME)).build();
return new DruidPlanner(Frameworks.getPlanner(frameworkConfig), plannerContext);
}
Aggregations