use of org.apache.flink.api.dag.Transformation in project flink by apache.
the class BatchExecExchange method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputType = (RowType) inputEdge.getOutputType();
boolean requireUndefinedExchangeMode = false;
final StreamPartitioner<RowData> partitioner;
final int parallelism;
final InputProperty inputProperty = getInputProperties().get(0);
final RequiredDistribution requiredDistribution = inputProperty.getRequiredDistribution();
final InputProperty.DistributionType distributionType = requiredDistribution.getType();
switch(distributionType) {
case ANY:
partitioner = null;
parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
break;
case BROADCAST:
partitioner = new BroadcastPartitioner<>();
parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
break;
case SINGLETON:
partitioner = new GlobalPartitioner<>();
parallelism = 1;
break;
case HASH:
partitioner = createHashPartitioner(((HashDistribution) requiredDistribution), inputType, config);
parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
break;
case KEEP_INPUT_AS_IS:
KeepInputAsIsDistribution keepInputAsIsDistribution = (KeepInputAsIsDistribution) requiredDistribution;
if (keepInputAsIsDistribution.isStrict()) {
// explicitly use ForwardPartitioner to guarantee the data distribution is
// exactly the same as input
partitioner = new ForwardPartitioner<>();
requireUndefinedExchangeMode = true;
} else {
RequiredDistribution inputDistribution = ((KeepInputAsIsDistribution) requiredDistribution).getInputDistribution();
checkArgument(inputDistribution instanceof HashDistribution, "Only HashDistribution is supported now");
partitioner = new ForwardForConsecutiveHashPartitioner<>(createHashPartitioner(((HashDistribution) inputDistribution), inputType, config));
}
parallelism = inputTransform.getParallelism();
break;
default:
throw new TableException(distributionType + "is not supported now!");
}
final StreamExchangeMode exchangeMode = requireUndefinedExchangeMode ? StreamExchangeMode.UNDEFINED : getBatchStreamExchangeMode(config, requiredExchangeMode);
final Transformation<RowData> transformation = new PartitionTransformation<>(inputTransform, partitioner, exchangeMode);
transformation.setParallelism(parallelism);
transformation.setOutputType(InternalTypeInfo.of(getOutputType()));
return transformation;
}
use of org.apache.flink.api.dag.Transformation in project flink by apache.
the class BatchExecHashAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = (RowType) getOutputType();
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
final AggregateInfoList aggInfos = AggregateUtil.transformToBatchAggregateInfoList(aggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), // aggCallNeedRetractions
null, // orderKeyIndexes
null);
final long managedMemory;
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
managedMemory = 0L;
generatedOperator = AggWithoutKeysCodeGenerator.genWithoutKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, isMerge, isFinal, "NoGrouping");
} else {
managedMemory = config.get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_HASH_AGG_MEMORY).getBytes();
generatedOperator = new HashAggCodeGenerator(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, grouping, auxGrouping, isMerge, isFinal).genWithKeys();
}
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), new CodeGenOperatorFactory<>(generatedOperator), InternalTypeInfo.of(outputRowType), inputTransform.getParallelism(), managedMemory);
}
use of org.apache.flink.api.dag.Transformation in project flink by apache.
the class CommonExecWindowTableFunction method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
WindowAssigner<TimeWindow> windowAssigner = createWindowAssigner(windowingStrategy);
final ZoneId shiftTimeZone = TimeWindowUtil.getShiftTimeZone(windowingStrategy.getTimeAttributeType(), config.getLocalTimeZone());
WindowTableFunctionOperator windowTableFunctionOperator = new WindowTableFunctionOperator(windowAssigner, windowingStrategy.getTimeAttributeIndex(), shiftTimeZone);
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(WINDOW_TRANSFORMATION, config), windowTableFunctionOperator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
}
use of org.apache.flink.api.dag.Transformation in project flink by apache.
the class BatchExecSink method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<Object> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform = (Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
final DynamicTableSink tableSink = tableSinkSpec.getTableSink(planner.getFlinkContext());
return createSinkTransformation(planner.getExecEnv(), config, inputTransform, tableSink, -1, false);
}
use of org.apache.flink.api.dag.Transformation in project flink by apache.
the class BatchExecSortAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = (RowType) getOutputType();
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
final AggregateInfoList aggInfos = AggregateUtil.transformToBatchAggregateInfoList(aggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), null, null);
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
generatedOperator = AggWithoutKeysCodeGenerator.genWithoutKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, isMerge, isFinal, "NoGrouping");
} else {
generatedOperator = SortAggCodeGenerator.genWithKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, grouping, auxGrouping, isMerge, isFinal);
}
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), new CodeGenOperatorFactory<>(generatedOperator), InternalTypeInfo.of(outputRowType), inputTransform.getParallelism());
}
Aggregations