use of org.apache.flink.table.planner.calcite.FlinkContext in project flink by apache.
the class PushPartitionIntoTableSourceScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Filter filter = call.rel(0);
LogicalTableScan scan = call.rel(1);
TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
RelDataType inputFieldTypes = filter.getInput().getRowType();
List<String> inputFieldNames = inputFieldTypes.getFieldNames();
List<String> partitionFieldNames = tableSourceTable.contextResolvedTable().<ResolvedCatalogTable>getResolvedTable().getPartitionKeys();
// extract partition predicates
RelBuilder relBuilder = call.builder();
RexBuilder rexBuilder = relBuilder.getRexBuilder();
Tuple2<Seq<RexNode>, Seq<RexNode>> allPredicates = RexNodeExtractor.extractPartitionPredicateList(filter.getCondition(), FlinkRelOptUtil.getMaxCnfNodeCount(scan), inputFieldNames.toArray(new String[0]), rexBuilder, partitionFieldNames.toArray(new String[0]));
RexNode partitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._1));
if (partitionPredicate.isAlwaysTrue()) {
return;
}
// build pruner
LogicalType[] partitionFieldTypes = partitionFieldNames.stream().map(name -> {
int index = inputFieldNames.indexOf(name);
if (index < 0) {
throw new TableException(String.format("Partitioned key '%s' isn't found in input columns. " + "Validator should have checked that.", name));
}
return inputFieldTypes.getFieldList().get(index).getType();
}).map(FlinkTypeFactory::toLogicalType).toArray(LogicalType[]::new);
RexNode finalPartitionPredicate = adjustPartitionPredicate(inputFieldNames, partitionFieldNames, partitionPredicate);
FlinkContext context = ShortcutUtils.unwrapContext(scan);
Function<List<Map<String, String>>, List<Map<String, String>>> defaultPruner = partitions -> PartitionPruner.prunePartitions(context.getTableConfig(), partitionFieldNames.toArray(new String[0]), partitionFieldTypes, partitions, finalPartitionPredicate);
// prune partitions
List<Map<String, String>> remainingPartitions = readPartitionsAndPrune(rexBuilder, context, tableSourceTable, defaultPruner, allPredicates._1(), inputFieldNames);
// apply push down
DynamicTableSource dynamicTableSource = tableSourceTable.tableSource().copy();
PartitionPushDownSpec partitionPushDownSpec = new PartitionPushDownSpec(remainingPartitions);
partitionPushDownSpec.apply(dynamicTableSource, SourceAbilityContext.from(scan));
// build new statistic
TableStats newTableStat = null;
if (tableSourceTable.contextResolvedTable().isPermanent()) {
ObjectIdentifier identifier = tableSourceTable.contextResolvedTable().getIdentifier();
ObjectPath tablePath = identifier.toObjectPath();
Catalog catalog = tableSourceTable.contextResolvedTable().getCatalog().get();
for (Map<String, String> partition : remainingPartitions) {
Optional<TableStats> partitionStats = getPartitionStats(catalog, tablePath, partition);
if (!partitionStats.isPresent()) {
// clear all information before
newTableStat = null;
break;
} else {
newTableStat = newTableStat == null ? partitionStats.get() : newTableStat.merge(partitionStats.get());
}
}
}
FlinkStatistic newStatistic = FlinkStatistic.builder().statistic(tableSourceTable.getStatistic()).tableStats(newTableStat).build();
TableSourceTable newTableSourceTable = tableSourceTable.copy(dynamicTableSource, newStatistic, new SourceAbilitySpec[] { partitionPushDownSpec });
LogicalTableScan newScan = LogicalTableScan.create(scan.getCluster(), newTableSourceTable, scan.getHints());
// transform to new node
RexNode nonPartitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._2()));
if (nonPartitionPredicate.isAlwaysTrue()) {
call.transformTo(newScan);
} else {
Filter newFilter = filter.copy(filter.getTraitSet(), newScan, nonPartitionPredicate);
call.transformTo(newFilter);
}
}
use of org.apache.flink.table.planner.calcite.FlinkContext in project flink by apache.
the class BridgingSqlFunction method of.
/**
* Creates an instance of a scalar or table function during translation.
*/
public static BridgingSqlFunction of(RelOptCluster cluster, ContextResolvedFunction resolvedFunction) {
final FlinkContext context = ShortcutUtils.unwrapContext(cluster);
final FlinkTypeFactory typeFactory = ShortcutUtils.unwrapTypeFactory(cluster);
return of(context, typeFactory, resolvedFunction);
}
use of org.apache.flink.table.planner.calcite.FlinkContext in project flink by apache.
the class LookupKeySerdeTest method testLookupKey.
@Test
public void testLookupKey() throws IOException {
TableConfig tableConfig = TableConfig.getDefault();
ModuleManager moduleManager = new ModuleManager();
CatalogManager catalogManager = CatalogManager.newBuilder().classLoader(Thread.currentThread().getContextClassLoader()).config(tableConfig.getConfiguration()).defaultCatalog("default_catalog", new GenericInMemoryCatalog("default_db")).build();
FlinkContext flinkContext = new FlinkContextImpl(false, tableConfig, moduleManager, new FunctionCatalog(tableConfig, catalogManager, moduleManager), catalogManager, null);
SerdeContext serdeCtx = new SerdeContext(null, flinkContext, Thread.currentThread().getContextClassLoader(), FlinkTypeFactory.INSTANCE(), FlinkSqlOperatorTable.instance());
ObjectReader objectReader = JsonSerdeUtil.createObjectReader(serdeCtx);
ObjectWriter objectWriter = JsonSerdeUtil.createObjectWriter(serdeCtx);
LookupJoinUtil.LookupKey[] lookupKeys = new LookupJoinUtil.LookupKey[] { new LookupJoinUtil.ConstantLookupKey(new BigIntType(), new RexBuilder(FlinkTypeFactory.INSTANCE()).makeLiteral("a")), new LookupJoinUtil.FieldRefLookupKey(3) };
for (LookupJoinUtil.LookupKey lookupKey : lookupKeys) {
LookupJoinUtil.LookupKey result = objectReader.readValue(objectWriter.writeValueAsString(lookupKey), LookupJoinUtil.LookupKey.class);
assertEquals(lookupKey, result);
}
}
use of org.apache.flink.table.planner.calcite.FlinkContext in project flink by apache.
the class CatalogSourceTable method toRel.
@Override
public RelNode toRel(ToRelContext toRelContext) {
final RelOptCluster cluster = toRelContext.getCluster();
final List<RelHint> hints = toRelContext.getTableHints();
final FlinkContext context = ShortcutUtils.unwrapContext(cluster);
final FlinkRelBuilder relBuilder = FlinkRelBuilder.of(cluster, relOptSchema);
// finalize catalog table with option hints
final Map<String, String> hintedOptions = FlinkHints.getHintedOptions(hints);
final ContextResolvedTable catalogTable = computeContextResolvedTable(context, hintedOptions);
// create table source
final DynamicTableSource tableSource = createDynamicTableSource(context, catalogTable.getResolvedTable());
// prepare table source and convert to RelNode
return DynamicSourceUtils.convertSourceToRel(!schemaTable.isStreamingMode(), context.getTableConfig().getConfiguration(), relBuilder, schemaTable.getContextResolvedTable(), schemaTable.getStatistic(), hints, tableSource);
}
use of org.apache.flink.table.planner.calcite.FlinkContext in project flink by apache.
the class PushFilterIntoSourceScanRuleBase method extractPredicates.
protected Tuple2<RexNode[], RexNode[]> extractPredicates(String[] inputNames, RexNode filterExpression, TableScan scan, RexBuilder rexBuilder) {
FlinkContext context = ShortcutUtils.unwrapContext(scan);
int maxCnfNodeCount = FlinkRelOptUtil.getMaxCnfNodeCount(scan);
RexNodeToExpressionConverter converter = new RexNodeToExpressionConverter(rexBuilder, inputNames, context.getFunctionCatalog(), context.getCatalogManager(), TimeZone.getTimeZone(context.getTableConfig().getLocalTimeZone()));
return RexNodeExtractor.extractConjunctiveConditions(filterExpression, maxCnfNodeCount, rexBuilder, converter);
}
Aggregations