use of org.apache.flink.table.api.TableException in project flink by apache.
the class DeadlockBreakupProcessor method process.
@Override
public ExecNodeGraph process(ExecNodeGraph execGraph, ProcessorContext context) {
if (!execGraph.getRootNodes().stream().allMatch(r -> r instanceof BatchExecNode)) {
throw new TableException("Only BatchExecNode DAG are supported now.");
}
InputPriorityConflictResolver resolver = new InputPriorityConflictResolver(execGraph.getRootNodes(), InputProperty.DamBehavior.END_INPUT, StreamExchangeMode.BATCH, context.getPlanner().getConfiguration());
resolver.detectAndResolve();
return execGraph;
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class BatchExecPythonGroupWindowAggregate method createPythonOneInputTransformation.
private OneInputTransformation<RowData, RowData> createPythonOneInputTransformation(Transformation<RowData> inputTransform, RowType inputRowType, RowType outputRowType, int maxLimitSize, long windowSize, long slideSize, Configuration pythonConfig, ExecNodeConfig config) {
int[] namePropertyTypeArray = Arrays.stream(namedWindowProperties).mapToInt(p -> {
WindowProperty property = p.getProperty();
if (property instanceof WindowStart) {
return 0;
}
if (property instanceof WindowEnd) {
return 1;
}
if (property instanceof RowtimeAttribute) {
return 2;
}
throw new TableException("Unexpected property " + property);
}).toArray();
Tuple2<int[], PythonFunctionInfo[]> aggInfos = CommonPythonUtil.extractPythonAggregateFunctionInfosFromAggregateCall(aggCalls);
int[] pythonUdafInputOffsets = aggInfos.f0;
PythonFunctionInfo[] pythonFunctionInfos = aggInfos.f1;
OneInputStreamOperator<RowData, RowData> pythonOperator = getPythonGroupWindowAggregateFunctionOperator(config, pythonConfig, inputRowType, outputRowType, maxLimitSize, windowSize, slideSize, namePropertyTypeArray, pythonUdafInputOffsets, pythonFunctionInfos);
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), pythonOperator, InternalTypeInfo.of(outputRowType), inputTransform.getParallelism());
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class BatchExecSortLimit method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
if (limitEnd == Long.MAX_VALUE) {
throw new TableException("Not support limitEnd is max value now!");
}
ExecEdge inputEdge = getInputEdges().get(0);
Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
RowType inputType = (RowType) inputEdge.getOutputType();
// generate comparator
GeneratedRecordComparator genComparator = ComparatorCodeGenerator.gen(config.getTableConfig(), "SortLimitComparator", inputType, sortSpec);
// TODO If input is ordered, there is no need to use the heap.
SortLimitOperator operator = new SortLimitOperator(isGlobal, limitStart, limitEnd, genComparator);
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), SimpleOperatorFactory.of(operator), InternalTypeInfo.of(inputType), inputTransform.getParallelism());
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class SetOperationParseStrategy method convert.
@Override
public Operation convert(String statement) {
Matcher matcher = pattern.matcher(statement.trim());
final List<String> operands = new ArrayList<>();
if (matcher.find()) {
if (matcher.group("key") != null) {
operands.add(matcher.group("key"));
operands.add(matcher.group("quotedVal") != null ? matcher.group("quotedVal") : matcher.group("val"));
}
}
// only capture SET
if (operands.isEmpty()) {
return new SetOperation();
} else if (operands.size() == 2) {
return new SetOperation(operands.get(0), operands.get(1));
} else {
// impossible
throw new TableException(String.format("Failed to convert the statement to SET operation: %s.", statement));
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class WritingMetadataSpec method apply.
@Override
public void apply(DynamicTableSink tableSink) {
if (tableSink instanceof SupportsWritingMetadata) {
DataType consumedDataType = TypeConversions.fromLogicalToDataType(consumedType);
((SupportsWritingMetadata) tableSink).applyWritableMetadata(metadataKeys, consumedDataType);
} else {
throw new TableException(String.format("%s does not support SupportsWritingMetadata.", tableSink.getClass().getName()));
}
}
Aggregations