Search in sources :

Example 6 with TableSourceTable

use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.

the class PushLocalAggIntoScanRuleBase method pushLocalAggregateIntoScan.

protected void pushLocalAggregateIntoScan(RelOptRuleCall call, BatchPhysicalGroupAggregateBase localAgg, BatchPhysicalTableSourceScan oldScan, int[] calcRefFields) {
    RowType inputType = FlinkTypeFactory.toLogicalRowType(oldScan.getRowType());
    List<int[]> groupingSets = Collections.singletonList(ArrayUtils.addAll(localAgg.grouping(), localAgg.auxGrouping()));
    List<AggregateCall> aggCallList = JavaScalaConversionUtil.toJava(localAgg.getAggCallList());
    // map arg index in aggregate to field index in scan through referred fields by calc.
    if (calcRefFields != null) {
        groupingSets = translateGroupingArgIndex(groupingSets, calcRefFields);
        aggCallList = translateAggCallArgIndex(aggCallList, calcRefFields);
    }
    RowType producedType = FlinkTypeFactory.toLogicalRowType(localAgg.getRowType());
    TableSourceTable oldTableSourceTable = oldScan.tableSourceTable();
    DynamicTableSource newTableSource = oldScan.tableSource().copy();
    boolean isPushDownSuccess = AggregatePushDownSpec.apply(inputType, groupingSets, aggCallList, producedType, newTableSource, SourceAbilityContext.from(oldScan));
    if (!isPushDownSuccess) {
        // aggregate push down failed, just return without changing any nodes.
        return;
    }
    // create new source table with new spec and statistic.
    AggregatePushDownSpec aggregatePushDownSpec = new AggregatePushDownSpec(inputType, groupingSets, aggCallList, producedType);
    TableSourceTable newTableSourceTable = oldTableSourceTable.copy(newTableSource, localAgg.getRowType(), new SourceAbilitySpec[] { aggregatePushDownSpec }).copy(FlinkStatistic.UNKNOWN());
    // transform to new nodes.
    BatchPhysicalTableSourceScan newScan = oldScan.copy(oldScan.getTraitSet(), newTableSourceTable);
    BatchPhysicalExchange oldExchange = call.rel(0);
    BatchPhysicalExchange newExchange = oldExchange.copy(oldExchange.getTraitSet(), newScan, oldExchange.getDistribution());
    call.transformTo(newExchange);
}
Also used : AggregateCall(org.apache.calcite.rel.core.AggregateCall) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) AggregatePushDownSpec(org.apache.flink.table.planner.plan.abilities.source.AggregatePushDownSpec) RowType(org.apache.flink.table.types.logical.RowType) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) BatchPhysicalExchange(org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalExchange) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) BatchPhysicalTableSourceScan(org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalTableSourceScan)

Example 7 with TableSourceTable

use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.

the class PushProjectIntoTableSourceScanRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    final LogicalProject project = call.rel(0);
    final LogicalTableScan scan = call.rel(1);
    final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
    final boolean supportsNestedProjection = supportsNestedProjection(sourceTable.tableSource());
    final int[] refFields = RexNodeExtractor.extractRefInputFields(project.getProjects());
    if (!supportsNestedProjection && refFields.length == scan.getRowType().getFieldCount()) {
        // There is no top-level projection and nested projections aren't supported.
        return;
    }
    final FlinkTypeFactory typeFactory = unwrapTypeFactory(scan);
    final ResolvedSchema schema = sourceTable.contextResolvedTable().getResolvedSchema();
    final RowType producedType = createProducedType(schema, sourceTable.tableSource());
    final NestedSchema projectedSchema = NestedProjectionUtil.build(getProjections(project, scan), typeFactory.buildRelNodeRowType(producedType));
    if (!supportsNestedProjection) {
        for (NestedColumn column : projectedSchema.columns().values()) {
            column.markLeaf();
        }
    }
    final List<SourceAbilitySpec> abilitySpecs = new ArrayList<>();
    final RowType newProducedType = performPushDown(sourceTable, projectedSchema, producedType, abilitySpecs);
    final DynamicTableSource newTableSource = sourceTable.tableSource().copy();
    final SourceAbilityContext context = SourceAbilityContext.from(scan);
    abilitySpecs.forEach(spec -> spec.apply(newTableSource, context));
    final RelDataType newRowType = typeFactory.buildRelNodeRowType(newProducedType);
    final TableSourceTable newSource = sourceTable.copy(newTableSource, newRowType, abilitySpecs.toArray(new SourceAbilitySpec[0]));
    final LogicalTableScan newScan = new LogicalTableScan(scan.getCluster(), scan.getTraitSet(), scan.getHints(), newSource);
    final LogicalProject newProject = project.copy(project.getTraitSet(), newScan, rewriteProjections(call, newSource, projectedSchema), project.getRowType());
    if (ProjectRemoveRule.isTrivial(newProject)) {
        call.transformTo(newScan);
    } else {
        call.transformTo(newProject);
    }
}
Also used : SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) ArrayList(java.util.ArrayList) RowType(org.apache.flink.table.types.logical.RowType) NestedColumn(org.apache.flink.table.planner.plan.utils.NestedColumn) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) SourceAbilityContext(org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext) LogicalProject(org.apache.calcite.rel.logical.LogicalProject) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) NestedSchema(org.apache.flink.table.planner.plan.utils.NestedSchema)

Example 8 with TableSourceTable

use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.

the class PushProjectIntoTableSourceScanRule method matches.

@Override
public boolean matches(RelOptRuleCall call) {
    final LogicalTableScan scan = call.rel(1);
    final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
    if (sourceTable == null) {
        return false;
    }
    final DynamicTableSource source = sourceTable.tableSource();
    // The source supports projection push-down.
    if (supportsProjectionPushDown(source)) {
        return Arrays.stream(sourceTable.abilitySpecs()).noneMatch(spec -> spec instanceof ProjectPushDownSpec);
    }
    // (for physical columns) is not supported.
    if (supportsMetadata(source)) {
        if (Arrays.stream(sourceTable.abilitySpecs()).anyMatch(spec -> spec instanceof ReadingMetadataSpec)) {
            return false;
        }
        return ((SupportsReadingMetadata) source).supportsMetadataProjection();
    }
    return false;
}
Also used : ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) SupportsReadingMetadata(org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource)

Example 9 with TableSourceTable

use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.

the class PushProjectIntoTableSourceScanRule method performPushDown.

private RowType performPushDown(TableSourceTable source, NestedSchema projectedSchema, RowType producedType, List<SourceAbilitySpec> abilitySpecs) {
    final int numPhysicalColumns;
    final List<NestedColumn> projectedMetadataColumns;
    if (supportsMetadata(source.tableSource())) {
        final List<String> declaredMetadataKeys = createRequiredMetadataKeys(source.contextResolvedTable().getResolvedSchema(), source.tableSource());
        numPhysicalColumns = producedType.getFieldCount() - declaredMetadataKeys.size();
        projectedMetadataColumns = IntStream.range(0, declaredMetadataKeys.size()).mapToObj(i -> producedType.getFieldNames().get(numPhysicalColumns + i)).map(fieldName -> projectedSchema.columns().get(fieldName)).filter(Objects::nonNull).collect(Collectors.toList());
    } else {
        numPhysicalColumns = producedType.getFieldCount();
        projectedMetadataColumns = Collections.emptyList();
    }
    final int[][] physicalProjections;
    if (supportsProjectionPushDown(source.tableSource())) {
        projectedMetadataColumns.forEach(metaColumn -> projectedSchema.columns().remove(metaColumn.name()));
        physicalProjections = NestedProjectionUtil.convertToIndexArray(projectedSchema);
        projectedMetadataColumns.forEach(metaColumn -> projectedSchema.columns().put(metaColumn.name(), metaColumn));
    } else {
        physicalProjections = IntStream.range(0, numPhysicalColumns).mapToObj(columnIndex -> new int[] { columnIndex }).toArray(int[][]::new);
    }
    final int[][] projectedFields = Stream.concat(Stream.of(physicalProjections), projectedMetadataColumns.stream().map(NestedColumn::indexInOriginSchema).map(columnIndex -> new int[] { columnIndex })).toArray(int[][]::new);
    int newIndex = physicalProjections.length;
    for (NestedColumn metaColumn : projectedMetadataColumns) {
        metaColumn.setIndexOfLeafInNewSchema(newIndex++);
    }
    if (supportsProjectionPushDown(source.tableSource())) {
        final RowType projectedPhysicalType = (RowType) Projection.of(physicalProjections).project(producedType);
        abilitySpecs.add(new ProjectPushDownSpec(physicalProjections, projectedPhysicalType));
    }
    final RowType newProducedType = (RowType) Projection.of(projectedFields).project(producedType);
    if (supportsMetadata(source.tableSource())) {
        final List<String> projectedMetadataKeys = projectedMetadataColumns.stream().map(NestedColumn::name).collect(Collectors.toList());
        abilitySpecs.add(new ReadingMetadataSpec(projectedMetadataKeys, newProducedType));
    }
    return newProducedType;
}
Also used : IntStream(java.util.stream.IntStream) NestedProjectionUtil(org.apache.flink.table.planner.plan.utils.NestedProjectionUtil) Arrays(java.util.Arrays) ShortcutUtils.unwrapTypeFactory(org.apache.flink.table.planner.utils.ShortcutUtils.unwrapTypeFactory) SourceAbilityContext(org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext) Column(org.apache.flink.table.catalog.Column) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) RexNodeExtractor(org.apache.flink.table.planner.plan.utils.RexNodeExtractor) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) RowType(org.apache.flink.table.types.logical.RowType) SupportsProjectionPushDown(org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown) ArrayList(java.util.ArrayList) RexNode(org.apache.calcite.rex.RexNode) NestedSchema(org.apache.flink.table.planner.plan.utils.NestedSchema) Projection(org.apache.flink.table.connector.Projection) ProjectRemoveRule(org.apache.calcite.rel.rules.ProjectRemoveRule) DynamicSourceUtils.createProducedType(org.apache.flink.table.planner.connectors.DynamicSourceUtils.createProducedType) RelDataType(org.apache.calcite.rel.type.RelDataType) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) TableConfig(org.apache.flink.table.api.TableConfig) LogicalProject(org.apache.calcite.rel.logical.LogicalProject) ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) TableException(org.apache.flink.table.api.TableException) ShortcutUtils.unwrapContext(org.apache.flink.table.planner.utils.ShortcutUtils.unwrapContext) RelRule(org.apache.calcite.plan.RelRule) NestedColumn(org.apache.flink.table.planner.plan.utils.NestedColumn) Collectors(java.util.stream.Collectors) DynamicSourceUtils.createRequiredMetadataKeys(org.apache.flink.table.planner.connectors.DynamicSourceUtils.createRequiredMetadataKeys) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) RexInputRef(org.apache.calcite.rex.RexInputRef) Objects(java.util.Objects) DynamicSourceUtils(org.apache.flink.table.planner.connectors.DynamicSourceUtils) RelOptRule(org.apache.calcite.plan.RelOptRule) List(java.util.List) Stream(java.util.stream.Stream) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint) SupportsReadingMetadata(org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) Internal(org.apache.flink.annotation.Internal) Collections(java.util.Collections) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) Objects(java.util.Objects) NestedColumn(org.apache.flink.table.planner.plan.utils.NestedColumn) RowType(org.apache.flink.table.types.logical.RowType) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint)

Example 10 with TableSourceTable

use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.

the class PushFilterInCalcIntoTableSourceScanRule method matches.

@Override
public boolean matches(RelOptRuleCall call) {
    if (!super.matches(call)) {
        return false;
    }
    FlinkLogicalCalc calc = call.rel(0);
    RexProgram originProgram = calc.getProgram();
    if (originProgram.getCondition() == null) {
        return false;
    }
    FlinkLogicalTableSourceScan scan = call.rel(1);
    TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
    // we can not push filter twice
    return canPushdownFilter(tableSourceTable);
}
Also used : FlinkLogicalCalc(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalCalc) RexProgram(org.apache.calcite.rex.RexProgram) FlinkLogicalTableSourceScan(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableSourceScan) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable)

Aggregations

TableSourceTable (org.apache.flink.table.planner.plan.schema.TableSourceTable)25 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)13 LogicalTableScan (org.apache.calcite.rel.logical.LogicalTableScan)9 SourceAbilitySpec (org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec)9 RexNode (org.apache.calcite.rex.RexNode)8 ArrayList (java.util.ArrayList)7 RelDataType (org.apache.calcite.rel.type.RelDataType)7 SourceAbilityContext (org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext)7 RowType (org.apache.flink.table.types.logical.RowType)7 Arrays (java.util.Arrays)5 List (java.util.List)5 RelOptRule (org.apache.calcite.plan.RelOptRule)5 RelOptRuleCall (org.apache.calcite.plan.RelOptRuleCall)5 Filter (org.apache.calcite.rel.core.Filter)5 TableException (org.apache.flink.table.api.TableException)5 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)5 Collectors (java.util.stream.Collectors)4 RexInputRef (org.apache.calcite.rex.RexInputRef)4 ProjectPushDownSpec (org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec)4 FlinkLogicalTableSourceScan (org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableSourceScan)4