Search in sources :

Example 1 with LogicalTableScan

use of org.apache.calcite.rel.logical.LogicalTableScan in project samza by apache.

the class TestQueryPlanner method testRemoteJoinWithUdfAndFilterHelper.

void testRemoteJoinWithUdfAndFilterHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
    String sql = "Insert into testavro.enrichedPageViewTopic " + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName," + "       p.name as profileName, p.address as profileAddress " + "from testRemoteStore.Profile.`$table` as p " + "join testavro.PAGEVIEW as pv " + " on p.__key__ = BuildOutputRecord('id', pv.profileId)" + " where p.name = 'Mike' and pv.profileId = 1 and p.name = pv.pageKey";
    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
    Config samzaConfig = new MapConfig(staticConfigs);
    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
    /*
      Query plan without optimization:
      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
        LogicalFilter(condition=[AND(=($2, 'Mike'), =($10, 1), =($2, $9))])  ==> Only the second condition could be pushed down.
          LogicalProject(__key__=[$0], id=[$1], name=[$2], companyId=[$3], address=[$4], selfEmployed=[$5],
                                  phoneNumbers=[$6], mapValues=[$7], __key__0=[$8], pageKey=[$9], profileId=[$10])
                                  ==> ProjectMergeRule removes this redundant node.
            LogicalJoin(condition=[=($0, $11)], joinType=[inner])
              LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
              LogicalProject(__key__=[$0], pageKey=[$1], profileId=[$2], $f3=[BuildOutputRecord('id', $2)])  ==> Filter is pushed above project.
                LogicalTableScan(table=[[testavro, PAGEVIEW]])

      Query plan with optimization:
      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
          LogicalFilter(condition=[AND(=($2, 'Mike'), =($2, $9))])
            LogicalJoin(condition=[=($0, $11)], joinType=[inner])
              LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
              LogicalFilter(condition=[=($2, 1)])
                LogicalProject(__key__=[$0], pageKey=[$1], profileId=[$2], $f3=[BuildOutputRecord('id', $2)])
                  LogicalTableScan(table=[[testavro, PAGEVIEW]])
     */
    assertEquals(1, relRoots.size());
    RelRoot relRoot = relRoots.iterator().next();
    RelNode relNode = relRoot.rel;
    assertTrue(relNode instanceof LogicalProject);
    relNode = relNode.getInput(0);
    assertTrue(relNode instanceof LogicalFilter);
    if (enableOptimizer) {
        assertEquals("AND(=($2, $9), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
    } else {
        assertEquals("AND(=($2, $9), =(1, $10), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
    }
    relNode = relNode.getInput(0);
    if (enableOptimizer) {
        assertTrue(relNode instanceof LogicalJoin);
        assertEquals(2, relNode.getInputs().size());
    } else {
        assertTrue(relNode instanceof LogicalProject);
        relNode = relNode.getInput(0);
    }
    LogicalJoin join = (LogicalJoin) relNode;
    RelNode left = join.getLeft();
    RelNode right = join.getRight();
    assertTrue(left instanceof LogicalTableScan);
    if (enableOptimizer) {
        assertTrue(right instanceof LogicalFilter);
        assertEquals("=(1, $2)", ((LogicalFilter) right).getCondition().toString());
        relNode = right.getInput(0);
    } else {
        relNode = right;
    }
    assertTrue(relNode instanceof LogicalProject);
    relNode = relNode.getInput(0);
    assertTrue(relNode instanceof LogicalTableScan);
}
Also used : DslConverter(org.apache.samza.sql.interfaces.DslConverter) SamzaSqlTestConfig(org.apache.samza.sql.util.SamzaSqlTestConfig) SamzaSqlApplicationConfig(org.apache.samza.sql.runner.SamzaSqlApplicationConfig) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) LogicalFilter(org.apache.calcite.rel.logical.LogicalFilter) SamzaSqlDslConverterFactory(org.apache.samza.sql.dsl.SamzaSqlDslConverterFactory) RelRoot(org.apache.calcite.rel.RelRoot) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) RelNode(org.apache.calcite.rel.RelNode) LogicalJoin(org.apache.calcite.rel.logical.LogicalJoin) MapConfig(org.apache.samza.config.MapConfig) LogicalProject(org.apache.calcite.rel.logical.LogicalProject)

Example 2 with LogicalTableScan

use of org.apache.calcite.rel.logical.LogicalTableScan in project samza by apache.

the class TestQueryPlanner method testRemoteJoinFilterPushDownWithUdfInFilterAndOptimizer.

@Test
public void testRemoteJoinFilterPushDownWithUdfInFilterAndOptimizer() throws SamzaSqlValidatorException {
    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
    String sql = "Insert into testavro.enrichedPageViewTopic " + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName," + "       p.name as profileName, p.address as profileAddress " + "from testRemoteStore.Profile.`$table` as p " + "join testavro.PAGEVIEW as pv " + " on p.__key__ = pv.profileId" + " where p.name = pv.pageKey AND p.name = 'Mike' AND pv.profileId = MyTest(pv.profileId)";
    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(true));
    Config samzaConfig = new MapConfig(staticConfigs);
    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
    /*
      Query plan without optimization:
      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
        LogicalFilter(condition=[AND(=($2, $9), =($2, 'Mike'), =($10, CAST(MyTest($10)):INTEGER))])
          LogicalJoin(condition=[=($0, $10)], joinType=[inner])
            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
            LogicalTableScan(table=[[testavro, PAGEVIEW]])

      Query plan with optimization:
      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
        LogicalFilter(condition=[AND(=($2, $9), =($2, 'Mike'))])
          LogicalJoin(condition=[=($0, $10)], joinType=[inner])
            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
            LogicalFilter(condition=[=($2, CAST(MyTest($2)):INTEGER)])
              LogicalTableScan(table=[[testavro, PAGEVIEW]])
     */
    assertEquals(1, relRoots.size());
    RelRoot relRoot = relRoots.iterator().next();
    RelNode relNode = relRoot.rel;
    assertTrue(relNode instanceof LogicalProject);
    relNode = relNode.getInput(0);
    assertTrue(relNode instanceof LogicalFilter);
    assertEquals("AND(=($2, $9), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
    relNode = relNode.getInput(0);
    assertTrue(relNode instanceof LogicalJoin);
    assertEquals(2, relNode.getInputs().size());
    LogicalJoin join = (LogicalJoin) relNode;
    RelNode left = join.getLeft();
    RelNode right = join.getRight();
    assertTrue(left instanceof LogicalTableScan);
    assertTrue(right instanceof LogicalFilter);
    assertEquals("=($2, CAST(MyTest($2)):INTEGER)", ((LogicalFilter) right).getCondition().toString());
    assertTrue(right.getInput(0) instanceof LogicalTableScan);
}
Also used : DslConverter(org.apache.samza.sql.interfaces.DslConverter) SamzaSqlTestConfig(org.apache.samza.sql.util.SamzaSqlTestConfig) SamzaSqlApplicationConfig(org.apache.samza.sql.runner.SamzaSqlApplicationConfig) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) LogicalFilter(org.apache.calcite.rel.logical.LogicalFilter) SamzaSqlDslConverterFactory(org.apache.samza.sql.dsl.SamzaSqlDslConverterFactory) RelRoot(org.apache.calcite.rel.RelRoot) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) RelNode(org.apache.calcite.rel.RelNode) LogicalJoin(org.apache.calcite.rel.logical.LogicalJoin) MapConfig(org.apache.samza.config.MapConfig) LogicalProject(org.apache.calcite.rel.logical.LogicalProject) Test(org.junit.Test)

Example 3 with LogicalTableScan

use of org.apache.calcite.rel.logical.LogicalTableScan in project flink by apache.

the class PushPartitionIntoTableSourceScanRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    Filter filter = call.rel(0);
    LogicalTableScan scan = call.rel(1);
    TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
    RelDataType inputFieldTypes = filter.getInput().getRowType();
    List<String> inputFieldNames = inputFieldTypes.getFieldNames();
    List<String> partitionFieldNames = tableSourceTable.contextResolvedTable().<ResolvedCatalogTable>getResolvedTable().getPartitionKeys();
    // extract partition predicates
    RelBuilder relBuilder = call.builder();
    RexBuilder rexBuilder = relBuilder.getRexBuilder();
    Tuple2<Seq<RexNode>, Seq<RexNode>> allPredicates = RexNodeExtractor.extractPartitionPredicateList(filter.getCondition(), FlinkRelOptUtil.getMaxCnfNodeCount(scan), inputFieldNames.toArray(new String[0]), rexBuilder, partitionFieldNames.toArray(new String[0]));
    RexNode partitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._1));
    if (partitionPredicate.isAlwaysTrue()) {
        return;
    }
    // build pruner
    LogicalType[] partitionFieldTypes = partitionFieldNames.stream().map(name -> {
        int index = inputFieldNames.indexOf(name);
        if (index < 0) {
            throw new TableException(String.format("Partitioned key '%s' isn't found in input columns. " + "Validator should have checked that.", name));
        }
        return inputFieldTypes.getFieldList().get(index).getType();
    }).map(FlinkTypeFactory::toLogicalType).toArray(LogicalType[]::new);
    RexNode finalPartitionPredicate = adjustPartitionPredicate(inputFieldNames, partitionFieldNames, partitionPredicate);
    FlinkContext context = ShortcutUtils.unwrapContext(scan);
    Function<List<Map<String, String>>, List<Map<String, String>>> defaultPruner = partitions -> PartitionPruner.prunePartitions(context.getTableConfig(), partitionFieldNames.toArray(new String[0]), partitionFieldTypes, partitions, finalPartitionPredicate);
    // prune partitions
    List<Map<String, String>> remainingPartitions = readPartitionsAndPrune(rexBuilder, context, tableSourceTable, defaultPruner, allPredicates._1(), inputFieldNames);
    // apply push down
    DynamicTableSource dynamicTableSource = tableSourceTable.tableSource().copy();
    PartitionPushDownSpec partitionPushDownSpec = new PartitionPushDownSpec(remainingPartitions);
    partitionPushDownSpec.apply(dynamicTableSource, SourceAbilityContext.from(scan));
    // build new statistic
    TableStats newTableStat = null;
    if (tableSourceTable.contextResolvedTable().isPermanent()) {
        ObjectIdentifier identifier = tableSourceTable.contextResolvedTable().getIdentifier();
        ObjectPath tablePath = identifier.toObjectPath();
        Catalog catalog = tableSourceTable.contextResolvedTable().getCatalog().get();
        for (Map<String, String> partition : remainingPartitions) {
            Optional<TableStats> partitionStats = getPartitionStats(catalog, tablePath, partition);
            if (!partitionStats.isPresent()) {
                // clear all information before
                newTableStat = null;
                break;
            } else {
                newTableStat = newTableStat == null ? partitionStats.get() : newTableStat.merge(partitionStats.get());
            }
        }
    }
    FlinkStatistic newStatistic = FlinkStatistic.builder().statistic(tableSourceTable.getStatistic()).tableStats(newTableStat).build();
    TableSourceTable newTableSourceTable = tableSourceTable.copy(dynamicTableSource, newStatistic, new SourceAbilitySpec[] { partitionPushDownSpec });
    LogicalTableScan newScan = LogicalTableScan.create(scan.getCluster(), newTableSourceTable, scan.getHints());
    // transform to new node
    RexNode nonPartitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._2()));
    if (nonPartitionPredicate.isAlwaysTrue()) {
        call.transformTo(newScan);
    } else {
        Filter newFilter = filter.copy(filter.getTraitSet(), newScan, nonPartitionPredicate);
        call.transformTo(newFilter);
    }
}
Also used : CatalogColumnStatistics(org.apache.flink.table.catalog.stats.CatalogColumnStatistics) Arrays(java.util.Arrays) SourceAbilityContext(org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext) PartitionNotExistException(org.apache.flink.table.catalog.exceptions.PartitionNotExistException) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ShortcutUtils(org.apache.flink.table.planner.utils.ShortcutUtils) SupportsPartitionPushDown(org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDown) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) RexUtil(org.apache.calcite.rex.RexUtil) RexNode(org.apache.calcite.rex.RexNode) RelBuilder(org.apache.calcite.tools.RelBuilder) ResolvedExpression(org.apache.flink.table.expressions.ResolvedExpression) Map(java.util.Map) TableStats(org.apache.flink.table.plan.stats.TableStats) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) PartitionPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.PartitionPushDownSpec) TimeZone(java.util.TimeZone) Seq(scala.collection.Seq) FlinkContext(org.apache.flink.table.planner.calcite.FlinkContext) Tuple2(scala.Tuple2) Collectors(java.util.stream.Collectors) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) RexInputRef(org.apache.calcite.rex.RexInputRef) List(java.util.List) TableNotPartitionedException(org.apache.flink.table.catalog.exceptions.TableNotPartitionedException) LogicalType(org.apache.flink.table.types.logical.LogicalType) Optional(java.util.Optional) RexNodeToExpressionConverter(org.apache.flink.table.planner.plan.utils.RexNodeToExpressionConverter) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) CatalogTableStatisticsConverter(org.apache.flink.table.planner.utils.CatalogTableStatisticsConverter) RexNodeExtractor(org.apache.flink.table.planner.plan.utils.RexNodeExtractor) Expression(org.apache.flink.table.expressions.Expression) Filter(org.apache.calcite.rel.core.Filter) ObjectPath(org.apache.flink.table.catalog.ObjectPath) Function(java.util.function.Function) ArrayList(java.util.ArrayList) CatalogTableStatistics(org.apache.flink.table.catalog.stats.CatalogTableStatistics) Catalog(org.apache.flink.table.catalog.Catalog) PartitionPruner(org.apache.flink.table.planner.plan.utils.PartitionPruner) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) RelDataType(org.apache.calcite.rel.type.RelDataType) JavaConversions(scala.collection.JavaConversions) TableNotExistException(org.apache.flink.table.catalog.exceptions.TableNotExistException) RexBuilder(org.apache.calcite.rex.RexBuilder) TableException(org.apache.flink.table.api.TableException) Option(scala.Option) FlinkRelOptUtil(org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) RelOptRule(org.apache.calcite.plan.RelOptRule) FlinkStatistic(org.apache.flink.table.planner.plan.stats.FlinkStatistic) RexShuttle(org.apache.calcite.rex.RexShuttle) CatalogException(org.apache.flink.table.catalog.exceptions.CatalogException) ObjectPath(org.apache.flink.table.catalog.ObjectPath) LogicalType(org.apache.flink.table.types.logical.LogicalType) RelDataType(org.apache.calcite.rel.type.RelDataType) FlinkStatistic(org.apache.flink.table.planner.plan.stats.FlinkStatistic) RexBuilder(org.apache.calcite.rex.RexBuilder) List(java.util.List) ArrayList(java.util.ArrayList) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) TableException(org.apache.flink.table.api.TableException) RelBuilder(org.apache.calcite.tools.RelBuilder) FlinkContext(org.apache.flink.table.planner.calcite.FlinkContext) TableStats(org.apache.flink.table.plan.stats.TableStats) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) Catalog(org.apache.flink.table.catalog.Catalog) PartitionPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.PartitionPushDownSpec) Filter(org.apache.calcite.rel.core.Filter) Map(java.util.Map) Seq(scala.collection.Seq) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) RexNode(org.apache.calcite.rex.RexNode)

Example 4 with LogicalTableScan

use of org.apache.calcite.rel.logical.LogicalTableScan in project flink by apache.

the class PushProjectIntoTableSourceScanRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    final LogicalProject project = call.rel(0);
    final LogicalTableScan scan = call.rel(1);
    final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
    final boolean supportsNestedProjection = supportsNestedProjection(sourceTable.tableSource());
    final int[] refFields = RexNodeExtractor.extractRefInputFields(project.getProjects());
    if (!supportsNestedProjection && refFields.length == scan.getRowType().getFieldCount()) {
        // There is no top-level projection and nested projections aren't supported.
        return;
    }
    final FlinkTypeFactory typeFactory = unwrapTypeFactory(scan);
    final ResolvedSchema schema = sourceTable.contextResolvedTable().getResolvedSchema();
    final RowType producedType = createProducedType(schema, sourceTable.tableSource());
    final NestedSchema projectedSchema = NestedProjectionUtil.build(getProjections(project, scan), typeFactory.buildRelNodeRowType(producedType));
    if (!supportsNestedProjection) {
        for (NestedColumn column : projectedSchema.columns().values()) {
            column.markLeaf();
        }
    }
    final List<SourceAbilitySpec> abilitySpecs = new ArrayList<>();
    final RowType newProducedType = performPushDown(sourceTable, projectedSchema, producedType, abilitySpecs);
    final DynamicTableSource newTableSource = sourceTable.tableSource().copy();
    final SourceAbilityContext context = SourceAbilityContext.from(scan);
    abilitySpecs.forEach(spec -> spec.apply(newTableSource, context));
    final RelDataType newRowType = typeFactory.buildRelNodeRowType(newProducedType);
    final TableSourceTable newSource = sourceTable.copy(newTableSource, newRowType, abilitySpecs.toArray(new SourceAbilitySpec[0]));
    final LogicalTableScan newScan = new LogicalTableScan(scan.getCluster(), scan.getTraitSet(), scan.getHints(), newSource);
    final LogicalProject newProject = project.copy(project.getTraitSet(), newScan, rewriteProjections(call, newSource, projectedSchema), project.getRowType());
    if (ProjectRemoveRule.isTrivial(newProject)) {
        call.transformTo(newScan);
    } else {
        call.transformTo(newProject);
    }
}
Also used : SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) ArrayList(java.util.ArrayList) RowType(org.apache.flink.table.types.logical.RowType) NestedColumn(org.apache.flink.table.planner.plan.utils.NestedColumn) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) SourceAbilityContext(org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext) LogicalProject(org.apache.calcite.rel.logical.LogicalProject) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) NestedSchema(org.apache.flink.table.planner.plan.utils.NestedSchema)

Example 5 with LogicalTableScan

use of org.apache.calcite.rel.logical.LogicalTableScan in project flink by apache.

the class PushProjectIntoTableSourceScanRule method matches.

@Override
public boolean matches(RelOptRuleCall call) {
    final LogicalTableScan scan = call.rel(1);
    final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
    if (sourceTable == null) {
        return false;
    }
    final DynamicTableSource source = sourceTable.tableSource();
    // The source supports projection push-down.
    if (supportsProjectionPushDown(source)) {
        return Arrays.stream(sourceTable.abilitySpecs()).noneMatch(spec -> spec instanceof ProjectPushDownSpec);
    }
    // (for physical columns) is not supported.
    if (supportsMetadata(source)) {
        if (Arrays.stream(sourceTable.abilitySpecs()).anyMatch(spec -> spec instanceof ReadingMetadataSpec)) {
            return false;
        }
        return ((SupportsReadingMetadata) source).supportsMetadataProjection();
    }
    return false;
}
Also used : ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) SupportsReadingMetadata(org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource)

Aggregations

LogicalTableScan (org.apache.calcite.rel.logical.LogicalTableScan)23 RelDataType (org.apache.calcite.rel.type.RelDataType)10 TableSourceTable (org.apache.flink.table.planner.plan.schema.TableSourceTable)8 LogicalProject (org.apache.calcite.rel.logical.LogicalProject)7 RexBuilder (org.apache.calcite.rex.RexBuilder)7 RelNode (org.apache.calcite.rel.RelNode)6 LogicalFilter (org.apache.calcite.rel.logical.LogicalFilter)5 Filter (org.apache.calcite.rel.core.Filter)4 LogicalJoin (org.apache.calcite.rel.logical.LogicalJoin)4 RelMetadataQuery (org.apache.calcite.rel.metadata.RelMetadataQuery)4 RexNode (org.apache.calcite.rex.RexNode)4 RelBuilder (org.apache.calcite.tools.RelBuilder)4 ArrayList (java.util.ArrayList)3 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)3 RelOptCluster (org.apache.calcite.plan.RelOptCluster)3 RelOptTable (org.apache.calcite.plan.RelOptTable)3 HepPlanner (org.apache.calcite.plan.hep.HepPlanner)3 HepProgramBuilder (org.apache.calcite.plan.hep.HepProgramBuilder)3 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)3 Before (org.junit.Before)3