use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class TemporalTableSourceSpec method getTemporalTable.
@JsonIgnore
public RelOptTable getTemporalTable(FlinkContext flinkContext) {
if (null != temporalTable) {
return temporalTable;
}
if (null != tableSourceSpec && null != outputType) {
LookupTableSource lookupTableSource = tableSourceSpec.getLookupTableSource(flinkContext);
SourceAbilitySpec[] sourceAbilitySpecs = null;
if (null != tableSourceSpec.getSourceAbilities()) {
sourceAbilitySpecs = tableSourceSpec.getSourceAbilities().toArray(new SourceAbilitySpec[0]);
}
return new TableSourceTable(null, outputType, FlinkStatistic.UNKNOWN(), lookupTableSource, true, tableSourceSpec.getContextResolvedTable(), flinkContext, sourceAbilitySpecs);
}
throw new TableException("Can not obtain temporalTable correctly!");
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class TemporalTableSourceSpecSerdeTest method testTemporalTableSourceSpecSerde.
public static Stream<TemporalTableSourceSpec> testTemporalTableSourceSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", "filesystem");
options1.put("format", "testcsv");
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable1, resolvedSchema1);
RelDataType relDataType1 = FACTORY.createSqlType(SqlTypeName.BIGINT);
LookupTableSource lookupTableSource = new TestValuesTableFactory.MockedLookupTableSource();
TableSourceTable tableSourceTable1 = new TableSourceTable(null, relDataType1, FlinkStatistic.UNKNOWN(), lookupTableSource, true, ContextResolvedTable.temporary(ObjectIdentifier.of("default_catalog", "default_db", "MyTable"), resolvedCatalogTable), FLINK_CONTEXT, new SourceAbilitySpec[] { new LimitPushDownSpec(100) });
TemporalTableSourceSpec temporalTableSourceSpec1 = new TemporalTableSourceSpec(tableSourceTable1);
return Stream.of(temporalTableSourceSpec1);
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushPartitionIntoTableSourceScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Filter filter = call.rel(0);
LogicalTableScan scan = call.rel(1);
TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
RelDataType inputFieldTypes = filter.getInput().getRowType();
List<String> inputFieldNames = inputFieldTypes.getFieldNames();
List<String> partitionFieldNames = tableSourceTable.contextResolvedTable().<ResolvedCatalogTable>getResolvedTable().getPartitionKeys();
// extract partition predicates
RelBuilder relBuilder = call.builder();
RexBuilder rexBuilder = relBuilder.getRexBuilder();
Tuple2<Seq<RexNode>, Seq<RexNode>> allPredicates = RexNodeExtractor.extractPartitionPredicateList(filter.getCondition(), FlinkRelOptUtil.getMaxCnfNodeCount(scan), inputFieldNames.toArray(new String[0]), rexBuilder, partitionFieldNames.toArray(new String[0]));
RexNode partitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._1));
if (partitionPredicate.isAlwaysTrue()) {
return;
}
// build pruner
LogicalType[] partitionFieldTypes = partitionFieldNames.stream().map(name -> {
int index = inputFieldNames.indexOf(name);
if (index < 0) {
throw new TableException(String.format("Partitioned key '%s' isn't found in input columns. " + "Validator should have checked that.", name));
}
return inputFieldTypes.getFieldList().get(index).getType();
}).map(FlinkTypeFactory::toLogicalType).toArray(LogicalType[]::new);
RexNode finalPartitionPredicate = adjustPartitionPredicate(inputFieldNames, partitionFieldNames, partitionPredicate);
FlinkContext context = ShortcutUtils.unwrapContext(scan);
Function<List<Map<String, String>>, List<Map<String, String>>> defaultPruner = partitions -> PartitionPruner.prunePartitions(context.getTableConfig(), partitionFieldNames.toArray(new String[0]), partitionFieldTypes, partitions, finalPartitionPredicate);
// prune partitions
List<Map<String, String>> remainingPartitions = readPartitionsAndPrune(rexBuilder, context, tableSourceTable, defaultPruner, allPredicates._1(), inputFieldNames);
// apply push down
DynamicTableSource dynamicTableSource = tableSourceTable.tableSource().copy();
PartitionPushDownSpec partitionPushDownSpec = new PartitionPushDownSpec(remainingPartitions);
partitionPushDownSpec.apply(dynamicTableSource, SourceAbilityContext.from(scan));
// build new statistic
TableStats newTableStat = null;
if (tableSourceTable.contextResolvedTable().isPermanent()) {
ObjectIdentifier identifier = tableSourceTable.contextResolvedTable().getIdentifier();
ObjectPath tablePath = identifier.toObjectPath();
Catalog catalog = tableSourceTable.contextResolvedTable().getCatalog().get();
for (Map<String, String> partition : remainingPartitions) {
Optional<TableStats> partitionStats = getPartitionStats(catalog, tablePath, partition);
if (!partitionStats.isPresent()) {
// clear all information before
newTableStat = null;
break;
} else {
newTableStat = newTableStat == null ? partitionStats.get() : newTableStat.merge(partitionStats.get());
}
}
}
FlinkStatistic newStatistic = FlinkStatistic.builder().statistic(tableSourceTable.getStatistic()).tableStats(newTableStat).build();
TableSourceTable newTableSourceTable = tableSourceTable.copy(dynamicTableSource, newStatistic, new SourceAbilitySpec[] { partitionPushDownSpec });
LogicalTableScan newScan = LogicalTableScan.create(scan.getCluster(), newTableSourceTable, scan.getHints());
// transform to new node
RexNode nonPartitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._2()));
if (nonPartitionPredicate.isAlwaysTrue()) {
call.transformTo(newScan);
} else {
Filter newFilter = filter.copy(filter.getTraitSet(), newScan, nonPartitionPredicate);
call.transformTo(newFilter);
}
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushPartitionIntoTableSourceScanRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
Filter filter = call.rel(0);
if (filter.getCondition() == null) {
return false;
}
TableSourceTable tableSourceTable = call.rel(1).getTable().unwrap(TableSourceTable.class);
if (tableSourceTable == null) {
return false;
}
DynamicTableSource dynamicTableSource = tableSourceTable.tableSource();
if (!(dynamicTableSource instanceof SupportsPartitionPushDown)) {
return false;
}
CatalogTable catalogTable = tableSourceTable.contextResolvedTable().getTable();
if (!catalogTable.isPartitioned() || catalogTable.getPartitionKeys().isEmpty()) {
return false;
}
return Arrays.stream(tableSourceTable.abilitySpecs()).noneMatch(spec -> spec instanceof PartitionPushDownSpec);
}
use of org.apache.flink.table.planner.plan.schema.TableSourceTable in project flink by apache.
the class PushWatermarkIntoTableSourceScanRuleBase method getNewScan.
/**
* It uses the input watermark expression to generate the {@link WatermarkGeneratorSupplier}.
* After the {@link WatermarkStrategy} is pushed into the scan, it will build a new scan.
* However, when {@link FlinkLogicalWatermarkAssigner} is the parent of the {@link
* FlinkLogicalTableSourceScan} it should modify the rowtime type to keep the type of plan is
* consistent. In other cases, it just keep the data type of the scan as same as before and
* leave the work when rewriting the projection.
*
* <p>NOTES: the row type of the scan is not always as same as the watermark assigner. Because
* the scan will not add the rowtime column into the row when pushing the watermark assigner
* into the scan. In some cases, query may have computed columns defined on rowtime column. If
* modifying the type of the rowtime(with time attribute), it will also influence the type of
* the computed column. Therefore, if the watermark assigner is not the parent of the scan, set
* the type of the scan as before and leave the work to projection.
*/
protected FlinkLogicalTableSourceScan getNewScan(FlinkLogicalWatermarkAssigner watermarkAssigner, RexNode watermarkExpr, FlinkLogicalTableSourceScan scan, TableConfig tableConfig, boolean useWatermarkAssignerRowType) {
final TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
final DynamicTableSource newDynamicTableSource = tableSourceTable.tableSource().copy();
final boolean isSourceWatermark = newDynamicTableSource instanceof SupportsSourceWatermark && hasSourceWatermarkDeclaration(watermarkExpr);
final RelDataType newType;
if (useWatermarkAssignerRowType) {
// project is trivial and set rowtime type in scan
newType = watermarkAssigner.getRowType();
} else {
// project add/delete columns and set the rowtime column type in project
newType = scan.getRowType();
}
final RowType producedType = (RowType) FlinkTypeFactory.toLogicalType(newType);
final SourceAbilityContext abilityContext = SourceAbilityContext.from(scan);
final SourceAbilitySpec abilitySpec;
if (isSourceWatermark) {
final SourceWatermarkSpec sourceWatermarkSpec = new SourceWatermarkSpec(true, producedType);
sourceWatermarkSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = sourceWatermarkSpec;
} else {
final Duration idleTimeout = tableConfig.getConfiguration().get(ExecutionConfigOptions.TABLE_EXEC_SOURCE_IDLE_TIMEOUT);
final long idleTimeoutMillis;
if (!idleTimeout.isZero() && !idleTimeout.isNegative()) {
idleTimeoutMillis = idleTimeout.toMillis();
} else {
idleTimeoutMillis = -1L;
}
final WatermarkPushDownSpec watermarkPushDownSpec = new WatermarkPushDownSpec(watermarkExpr, idleTimeoutMillis, producedType);
watermarkPushDownSpec.apply(newDynamicTableSource, abilityContext);
abilitySpec = watermarkPushDownSpec;
}
TableSourceTable newTableSourceTable = tableSourceTable.copy(newDynamicTableSource, newType, new SourceAbilitySpec[] { abilitySpec });
return FlinkLogicalTableSourceScan.create(scan.getCluster(), scan.getHints(), newTableSourceTable);
}
Aggregations