Search in sources :

Example 11 with JavaTypeFactoryImpl

use of org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class HiveMaterializedViewsRegistry method createTableScan.

private static RelNode createTableScan(Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        for (RelDataTypeField field : rowType.getFieldList()) {
            druidColTypes.add(field.getType());
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
    } else {
        // Build Hive Table Scan Rel
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Example 12 with JavaTypeFactoryImpl

use of org.apache.calcite.jdbc.JavaTypeFactoryImpl in project storm by apache.

the class TestCompilerUtils method sqlOverNestedTable.

public static CalciteState sqlOverNestedTable(String sql) throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("ID", SqlTypeName.INTEGER).field("MAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)).field("NESTEDMAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)), true)).field("ARRAYFIELD", typeFactory.createTypeWithNullability(typeFactory.createArrayType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L), true)).build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}
Also used : CompilerUtil(org.apache.storm.sql.compiler.CompilerUtil) SqlOperatorTable(org.apache.calcite.sql.SqlOperatorTable) Table(org.apache.calcite.schema.Table) ChainedSqlOperatorTable(org.apache.calcite.sql.util.ChainedSqlOperatorTable) SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) StreamableTable(org.apache.calcite.schema.StreamableTable) SchemaPlus(org.apache.calcite.schema.SchemaPlus) ArrayList(java.util.ArrayList) StreamableTable(org.apache.calcite.schema.StreamableTable) ChainedSqlOperatorTable(org.apache.calcite.sql.util.ChainedSqlOperatorTable) CalciteCatalogReader(org.apache.calcite.prepare.CalciteCatalogReader) RelNode(org.apache.calcite.rel.RelNode) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) SqlOperatorTable(org.apache.calcite.sql.SqlOperatorTable) ChainedSqlOperatorTable(org.apache.calcite.sql.util.ChainedSqlOperatorTable) JavaTypeFactory(org.apache.calcite.adapter.java.JavaTypeFactory) Planner(org.apache.calcite.tools.Planner) FrameworkConfig(org.apache.calcite.tools.FrameworkConfig) SqlNode(org.apache.calcite.sql.SqlNode)

Example 13 with JavaTypeFactoryImpl

use of org.apache.calcite.jdbc.JavaTypeFactoryImpl in project druid by druid-io.

the class DruidSchemaTest method testGetTableMap.

@Test
public void testGetTableMap() {
    Assert.assertEquals(ImmutableSet.of("foo", "foo2"), schema.getTableNames());
    final Map<String, Table> tableMap = schema.getTableMap();
    Assert.assertEquals(ImmutableSet.of("foo", "foo2"), tableMap.keySet());
    final DruidTable fooTable = (DruidTable) tableMap.get("foo");
    final RelDataType rowType = fooTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> fields = rowType.getFieldList();
    Assert.assertEquals(6, fields.size());
    Assert.assertEquals("__time", fields.get(0).getName());
    Assert.assertEquals(SqlTypeName.TIMESTAMP, fields.get(0).getType().getSqlTypeName());
    Assert.assertEquals("cnt", fields.get(1).getName());
    Assert.assertEquals(SqlTypeName.BIGINT, fields.get(1).getType().getSqlTypeName());
    Assert.assertEquals("dim1", fields.get(2).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, fields.get(2).getType().getSqlTypeName());
    Assert.assertEquals("m1", fields.get(3).getName());
    Assert.assertEquals(SqlTypeName.BIGINT, fields.get(3).getType().getSqlTypeName());
    Assert.assertEquals("unique_dim1", fields.get(4).getName());
    Assert.assertEquals(SqlTypeName.OTHER, fields.get(4).getType().getSqlTypeName());
    Assert.assertEquals("dim2", fields.get(5).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, fields.get(5).getType().getSqlTypeName());
}
Also used : RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DruidTable(io.druid.sql.calcite.table.DruidTable) Table(org.apache.calcite.schema.Table) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) DruidTable(io.druid.sql.calcite.table.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) Test(org.junit.Test)

Example 14 with JavaTypeFactoryImpl

use of org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class HiveMaterializedViewsRegistry method createMaterializedViewScan.

private static RelNode createMaterializedViewScan(HiveConf conf, Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = CalcitePlanner.createPlanner(conf);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl(new HiveTypeSystemImpl()));
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        // @NOTE this code is very similar to the code at org/apache/hadoop/hive/ql/parse/CalcitePlanner.java:2362
        // @TODO it will be nice to refactor it
        RelDataTypeFactory dtFactory = cluster.getRexBuilder().getTypeFactory();
        for (RelDataTypeField field : rowType.getFieldList()) {
            if (DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(field.getName())) {
                // Druid's time column is always not null.
                druidColTypes.add(dtFactory.createTypeWithNullability(field.getType(), false));
            } else {
                druidColTypes.add(field.getType());
            }
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        rowType = dtFactory.createStructType(druidColTypes, druidColNames);
        RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger());
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.<RelNode>of(scan), ImmutableMap.of());
    } else {
        // Build Hive Table Scan Rel
        RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger());
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) HiveTypeSystemImpl(org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Example 15 with JavaTypeFactoryImpl

use of org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class TestCBORuleFiredOnlyOnce method testRuleFiredOnlyOnce.

@Test
public void testRuleFiredOnlyOnce() {
    HiveConf conf = new HiveConf();
    // Create HepPlanner
    HepProgramBuilder programBuilder = new HepProgramBuilder();
    programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
    programBuilder = programBuilder.addRuleCollection(ImmutableList.<RelOptRule>of(DummyRule.INSTANCE));
    // Create rules registry to not trigger a rule more than once
    HiveRulesRegistry registry = new HiveRulesRegistry();
    HivePlannerContext context = new HivePlannerContext(null, registry, null, null, null, null);
    HepPlanner planner = new HepPlanner(programBuilder.build(), context);
    // Cluster
    RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // Create MD provider
    HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf);
    List<RelMetadataProvider> list = Lists.newArrayList();
    list.add(mdProvider.getMetadataProvider());
    planner.registerMetadataProviders(list);
    RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
    final RelNode node = new DummyNode(cluster, cluster.traitSet());
    node.getCluster().setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, planner));
    planner.setRoot(node);
    planner.findBestExp();
    // Matches 2 times: one time the original node, one time the new node created by the rule
    assertEquals(2, DummyRule.INSTANCE.numberMatches);
    // It is fired only once: on the original node
    assertEquals(1, DummyRule.INSTANCE.numberOnMatch);
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) CachingRelMetadataProvider(org.apache.calcite.rel.metadata.CachingRelMetadataProvider) HepProgramBuilder(org.apache.calcite.plan.hep.HepProgramBuilder) HepPlanner(org.apache.calcite.plan.hep.HepPlanner) RelOptRule(org.apache.calcite.plan.RelOptRule) HiveRulesRegistry(org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry) AbstractRelNode(org.apache.calcite.rel.AbstractRelNode) RelNode(org.apache.calcite.rel.RelNode) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) ChainedRelMetadataProvider(org.apache.calcite.rel.metadata.ChainedRelMetadataProvider) CachingRelMetadataProvider(org.apache.calcite.rel.metadata.CachingRelMetadataProvider) RelMetadataProvider(org.apache.calcite.rel.metadata.RelMetadataProvider) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Test(org.junit.Test)

Aggregations

JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)16 Table (org.apache.calcite.schema.Table)8 JavaTypeFactory (org.apache.calcite.adapter.java.JavaTypeFactory)7 RelDataType (org.apache.calcite.rel.type.RelDataType)6 RexBuilder (org.apache.calcite.rex.RexBuilder)6 SchemaPlus (org.apache.calcite.schema.SchemaPlus)6 StreamableTable (org.apache.calcite.schema.StreamableTable)6 RelNode (org.apache.calcite.rel.RelNode)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 RelOptCluster (org.apache.calcite.plan.RelOptCluster)4 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)4 QueryPlanner (org.apache.storm.sql.planner.trident.QueryPlanner)4 TridentRel (org.apache.storm.sql.planner.trident.rel.TridentRel)4 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)3 CompilerUtil (org.apache.storm.sql.compiler.CompilerUtil)3 HashSet (java.util.HashSet)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 DruidSchema (org.apache.calcite.adapter.druid.DruidSchema)2 DruidTable (org.apache.calcite.adapter.druid.DruidTable)2