use of org.apache.flink.table.api.TableException in project flink by apache.
the class StatementSetImpl method addInsertSql.
@Override
public StatementSet addInsertSql(String statement) {
List<Operation> operations = tableEnvironment.getParser().parse(statement);
if (operations.size() != 1) {
throw new TableException("Only single statement is supported.");
}
Operation operation = operations.get(0);
if (operation instanceof ModifyOperation) {
this.operations.add((ModifyOperation) operation);
} else {
throw new TableException("Only insert statement is supported now.");
}
return this;
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class PushPartitionIntoTableSourceScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Filter filter = call.rel(0);
LogicalTableScan scan = call.rel(1);
TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
RelDataType inputFieldTypes = filter.getInput().getRowType();
List<String> inputFieldNames = inputFieldTypes.getFieldNames();
List<String> partitionFieldNames = tableSourceTable.contextResolvedTable().<ResolvedCatalogTable>getResolvedTable().getPartitionKeys();
// extract partition predicates
RelBuilder relBuilder = call.builder();
RexBuilder rexBuilder = relBuilder.getRexBuilder();
Tuple2<Seq<RexNode>, Seq<RexNode>> allPredicates = RexNodeExtractor.extractPartitionPredicateList(filter.getCondition(), FlinkRelOptUtil.getMaxCnfNodeCount(scan), inputFieldNames.toArray(new String[0]), rexBuilder, partitionFieldNames.toArray(new String[0]));
RexNode partitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._1));
if (partitionPredicate.isAlwaysTrue()) {
return;
}
// build pruner
LogicalType[] partitionFieldTypes = partitionFieldNames.stream().map(name -> {
int index = inputFieldNames.indexOf(name);
if (index < 0) {
throw new TableException(String.format("Partitioned key '%s' isn't found in input columns. " + "Validator should have checked that.", name));
}
return inputFieldTypes.getFieldList().get(index).getType();
}).map(FlinkTypeFactory::toLogicalType).toArray(LogicalType[]::new);
RexNode finalPartitionPredicate = adjustPartitionPredicate(inputFieldNames, partitionFieldNames, partitionPredicate);
FlinkContext context = ShortcutUtils.unwrapContext(scan);
Function<List<Map<String, String>>, List<Map<String, String>>> defaultPruner = partitions -> PartitionPruner.prunePartitions(context.getTableConfig(), partitionFieldNames.toArray(new String[0]), partitionFieldTypes, partitions, finalPartitionPredicate);
// prune partitions
List<Map<String, String>> remainingPartitions = readPartitionsAndPrune(rexBuilder, context, tableSourceTable, defaultPruner, allPredicates._1(), inputFieldNames);
// apply push down
DynamicTableSource dynamicTableSource = tableSourceTable.tableSource().copy();
PartitionPushDownSpec partitionPushDownSpec = new PartitionPushDownSpec(remainingPartitions);
partitionPushDownSpec.apply(dynamicTableSource, SourceAbilityContext.from(scan));
// build new statistic
TableStats newTableStat = null;
if (tableSourceTable.contextResolvedTable().isPermanent()) {
ObjectIdentifier identifier = tableSourceTable.contextResolvedTable().getIdentifier();
ObjectPath tablePath = identifier.toObjectPath();
Catalog catalog = tableSourceTable.contextResolvedTable().getCatalog().get();
for (Map<String, String> partition : remainingPartitions) {
Optional<TableStats> partitionStats = getPartitionStats(catalog, tablePath, partition);
if (!partitionStats.isPresent()) {
// clear all information before
newTableStat = null;
break;
} else {
newTableStat = newTableStat == null ? partitionStats.get() : newTableStat.merge(partitionStats.get());
}
}
}
FlinkStatistic newStatistic = FlinkStatistic.builder().statistic(tableSourceTable.getStatistic()).tableStats(newTableStat).build();
TableSourceTable newTableSourceTable = tableSourceTable.copy(dynamicTableSource, newStatistic, new SourceAbilitySpec[] { partitionPushDownSpec });
LogicalTableScan newScan = LogicalTableScan.create(scan.getCluster(), newTableSourceTable, scan.getHints());
// transform to new node
RexNode nonPartitionPredicate = RexUtil.composeConjunction(rexBuilder, JavaConversions.seqAsJavaList(allPredicates._2()));
if (nonPartitionPredicate.isAlwaysTrue()) {
call.transformTo(newScan);
} else {
Filter newFilter = filter.copy(filter.getTraitSet(), newScan, nonPartitionPredicate);
call.transformTo(newFilter);
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class PushPartitionIntoTableSourceScanRule method readPartitionsAndPrune.
private List<Map<String, String>> readPartitionsAndPrune(RexBuilder rexBuilder, FlinkContext context, TableSourceTable tableSourceTable, Function<List<Map<String, String>>, List<Map<String, String>>> pruner, Seq<RexNode> partitionPredicate, List<String> inputFieldNames) {
// get partitions from table/catalog and prune
Optional<Catalog> catalogOptional = tableSourceTable.contextResolvedTable().getCatalog();
DynamicTableSource dynamicTableSource = tableSourceTable.tableSource();
Optional<List<Map<String, String>>> optionalPartitions = ((SupportsPartitionPushDown) dynamicTableSource).listPartitions();
if (optionalPartitions.isPresent()) {
return pruner.apply(optionalPartitions.get());
} else {
// we will read partitions from catalog if table doesn't support listPartitions.
if (!catalogOptional.isPresent()) {
throw new TableException(String.format("Table '%s' connector doesn't provide partitions, and it cannot be loaded from the catalog", tableSourceTable.contextResolvedTable().getIdentifier().asSummaryString()));
}
try {
return readPartitionFromCatalogAndPrune(rexBuilder, context, catalogOptional.get(), tableSourceTable.contextResolvedTable().getIdentifier(), inputFieldNames, partitionPredicate, pruner);
} catch (TableNotExistException tableNotExistException) {
throw new TableException(String.format("Table %s is not found in catalog.", tableSourceTable.contextResolvedTable().getIdentifier().asSummaryString()));
} catch (TableNotPartitionedException tableNotPartitionedException) {
throw new TableException(String.format("Table %s is not a partitionable source. Validator should have checked it.", tableSourceTable.contextResolvedTable().getIdentifier().asSummaryString()), tableNotPartitionedException);
}
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class StreamPhysicalPythonGroupTableAggregateRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
FlinkLogicalTableAggregate agg = call.rel(0);
List<AggregateCall> aggCalls = agg.getAggCallList();
boolean existGeneralPythonFunction = aggCalls.stream().anyMatch(x -> PythonUtil.isPythonAggregate(x, PythonFunctionKind.GENERAL));
boolean existPandasFunction = aggCalls.stream().anyMatch(x -> PythonUtil.isPythonAggregate(x, PythonFunctionKind.PANDAS));
boolean existJavaUserDefinedFunction = aggCalls.stream().anyMatch(x -> !PythonUtil.isPythonAggregate(x, null) && !PythonUtil.isBuiltInAggregate(x));
if (existPandasFunction || existGeneralPythonFunction) {
if (existPandasFunction) {
throw new TableException("Pandas UDAFs are not supported in streaming mode currently.");
}
if (existJavaUserDefinedFunction) {
throw new TableException("Python UDAF and Java/Scala UDAF cannot be used together.");
}
return true;
} else {
return false;
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class StreamPhysicalPythonOverAggregateRule method convert.
@Override
public RelNode convert(RelNode rel) {
FlinkLogicalOverAggregate logicWindow = (FlinkLogicalOverAggregate) rel;
if (logicWindow.groups.size() > 1) {
throw new TableException("Over Agg: Unsupported use of OVER windows. " + "All aggregates must be computed on the same window. " + "please re-check the over window statement.");
}
ImmutableBitSet keys = logicWindow.groups.get(0).keys;
FlinkRelDistribution requiredDistribution;
if (!keys.isEmpty()) {
requiredDistribution = FlinkRelDistribution.hash(keys.asList(), true);
} else {
requiredDistribution = FlinkRelDistribution.SINGLETON();
}
RelNode input = logicWindow.getInput();
RelTraitSet requiredTraitSet = input.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL()).replace(requiredDistribution);
RelTraitSet providedTraitSet = rel.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL());
RelNode newInput = RelOptRule.convert(input, requiredTraitSet);
return new StreamPhysicalPythonOverAggregate(rel.getCluster(), providedTraitSet, newInput, rel.getRowType(), logicWindow);
}
Aggregations