use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushLocalAggIntoScanRuleBase method pushLocalAggregateIntoScan.
protected void pushLocalAggregateIntoScan(RelOptRuleCall call, BatchPhysicalGroupAggregateBase localAgg, BatchPhysicalTableSourceScan oldScan, int[] calcRefFields) {
RowType inputType = FlinkTypeFactory.toLogicalRowType(oldScan.getRowType());
List<int[]> groupingSets = Collections.singletonList(ArrayUtils.addAll(localAgg.grouping(), localAgg.auxGrouping()));
List<AggregateCall> aggCallList = JavaScalaConversionUtil.toJava(localAgg.getAggCallList());
// map arg index in aggregate to field index in scan through referred fields by calc.
if (calcRefFields != null) {
groupingSets = translateGroupingArgIndex(groupingSets, calcRefFields);
aggCallList = translateAggCallArgIndex(aggCallList, calcRefFields);
}
RowType producedType = FlinkTypeFactory.toLogicalRowType(localAgg.getRowType());
TableSourceTable oldTableSourceTable = oldScan.tableSourceTable();
DynamicTableSource newTableSource = oldScan.tableSource().copy();
boolean isPushDownSuccess = AggregatePushDownSpec.apply(inputType, groupingSets, aggCallList, producedType, newTableSource, SourceAbilityContext.from(oldScan));
if (!isPushDownSuccess) {
// aggregate push down failed, just return without changing any nodes.
return;
}
// create new source table with new spec and statistic.
AggregatePushDownSpec aggregatePushDownSpec = new AggregatePushDownSpec(inputType, groupingSets, aggCallList, producedType);
TableSourceTable newTableSourceTable = oldTableSourceTable.copy(newTableSource, localAgg.getRowType(), new SourceAbilitySpec[] { aggregatePushDownSpec }).copy(FlinkStatistic.UNKNOWN());
// transform to new nodes.
BatchPhysicalTableSourceScan newScan = oldScan.copy(oldScan.getTraitSet(), newTableSourceTable);
BatchPhysicalExchange oldExchange = call.rel(0);
BatchPhysicalExchange newExchange = oldExchange.copy(oldExchange.getTraitSet(), newScan, oldExchange.getDistribution());
call.transformTo(newExchange);
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushProjectIntoTableSourceScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final LogicalProject project = call.rel(0);
final LogicalTableScan scan = call.rel(1);
final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
final boolean supportsNestedProjection = supportsNestedProjection(sourceTable.tableSource());
final int[] refFields = RexNodeExtractor.extractRefInputFields(project.getProjects());
if (!supportsNestedProjection && refFields.length == scan.getRowType().getFieldCount()) {
// There is no top-level projection and nested projections aren't supported.
return;
}
final FlinkTypeFactory typeFactory = unwrapTypeFactory(scan);
final ResolvedSchema schema = sourceTable.contextResolvedTable().getResolvedSchema();
final RowType producedType = createProducedType(schema, sourceTable.tableSource());
final NestedSchema projectedSchema = NestedProjectionUtil.build(getProjections(project, scan), typeFactory.buildRelNodeRowType(producedType));
if (!supportsNestedProjection) {
for (NestedColumn column : projectedSchema.columns().values()) {
column.markLeaf();
}
}
final List<SourceAbilitySpec> abilitySpecs = new ArrayList<>();
final RowType newProducedType = performPushDown(sourceTable, projectedSchema, producedType, abilitySpecs);
final DynamicTableSource newTableSource = sourceTable.tableSource().copy();
final SourceAbilityContext context = SourceAbilityContext.from(scan);
abilitySpecs.forEach(spec -> spec.apply(newTableSource, context));
final RelDataType newRowType = typeFactory.buildRelNodeRowType(newProducedType);
final TableSourceTable newSource = sourceTable.copy(newTableSource, newRowType, abilitySpecs.toArray(new SourceAbilitySpec[0]));
final LogicalTableScan newScan = new LogicalTableScan(scan.getCluster(), scan.getTraitSet(), scan.getHints(), newSource);
final LogicalProject newProject = project.copy(project.getTraitSet(), newScan, rewriteProjections(call, newSource, projectedSchema), project.getRowType());
if (ProjectRemoveRule.isTrivial(newProject)) {
call.transformTo(newScan);
} else {
call.transformTo(newProject);
}
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushProjectIntoTableSourceScanRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
final LogicalTableScan scan = call.rel(1);
final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
if (sourceTable == null) {
return false;
}
final DynamicTableSource source = sourceTable.tableSource();
// The source supports projection push-down.
if (supportsProjectionPushDown(source)) {
return Arrays.stream(sourceTable.abilitySpecs()).noneMatch(spec -> spec instanceof ProjectPushDownSpec);
}
// (for physical columns) is not supported.
if (supportsMetadata(source)) {
if (Arrays.stream(sourceTable.abilitySpecs()).anyMatch(spec -> spec instanceof ReadingMetadataSpec)) {
return false;
}
return ((SupportsReadingMetadata) source).supportsMetadataProjection();
}
return false;
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushProjectIntoTableSourceScanRule method performPushDown.
private RowType performPushDown(TableSourceTable source, NestedSchema projectedSchema, RowType producedType, List<SourceAbilitySpec> abilitySpecs) {
final int numPhysicalColumns;
final List<NestedColumn> projectedMetadataColumns;
if (supportsMetadata(source.tableSource())) {
final List<String> declaredMetadataKeys = createRequiredMetadataKeys(source.contextResolvedTable().getResolvedSchema(), source.tableSource());
numPhysicalColumns = producedType.getFieldCount() - declaredMetadataKeys.size();
projectedMetadataColumns = IntStream.range(0, declaredMetadataKeys.size()).mapToObj(i -> producedType.getFieldNames().get(numPhysicalColumns + i)).map(fieldName -> projectedSchema.columns().get(fieldName)).filter(Objects::nonNull).collect(Collectors.toList());
} else {
numPhysicalColumns = producedType.getFieldCount();
projectedMetadataColumns = Collections.emptyList();
}
final int[][] physicalProjections;
if (supportsProjectionPushDown(source.tableSource())) {
projectedMetadataColumns.forEach(metaColumn -> projectedSchema.columns().remove(metaColumn.name()));
physicalProjections = NestedProjectionUtil.convertToIndexArray(projectedSchema);
projectedMetadataColumns.forEach(metaColumn -> projectedSchema.columns().put(metaColumn.name(), metaColumn));
} else {
physicalProjections = IntStream.range(0, numPhysicalColumns).mapToObj(columnIndex -> new int[] { columnIndex }).toArray(int[][]::new);
}
final int[][] projectedFields = Stream.concat(Stream.of(physicalProjections), projectedMetadataColumns.stream().map(NestedColumn::indexInOriginSchema).map(columnIndex -> new int[] { columnIndex })).toArray(int[][]::new);
int newIndex = physicalProjections.length;
for (NestedColumn metaColumn : projectedMetadataColumns) {
metaColumn.setIndexOfLeafInNewSchema(newIndex++);
}
if (supportsProjectionPushDown(source.tableSource())) {
final RowType projectedPhysicalType = (RowType) Projection.of(physicalProjections).project(producedType);
abilitySpecs.add(new ProjectPushDownSpec(physicalProjections, projectedPhysicalType));
}
final RowType newProducedType = (RowType) Projection.of(projectedFields).project(producedType);
if (supportsMetadata(source.tableSource())) {
final List<String> projectedMetadataKeys = projectedMetadataColumns.stream().map(NestedColumn::name).collect(Collectors.toList());
abilitySpecs.add(new ReadingMetadataSpec(projectedMetadataKeys, newProducedType));
}
return newProducedType;
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushFilterInCalcIntoTableSourceScanRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
if (!super.matches(call)) {
return false;
}
FlinkLogicalCalc calc = call.rel(0);
RexProgram originProgram = calc.getProgram();
if (originProgram.getCondition() == null) {
return false;
}
FlinkLogicalTableSourceScan scan = call.rel(1);
TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
// we can not push filter twice
return canPushdownFilter(tableSourceTable);
}
Aggregations