use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.
the class BatchExecHashAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = (RowType) getOutputType();
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
final AggregateInfoList aggInfos = AggregateUtil.transformToBatchAggregateInfoList(aggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), // aggCallNeedRetractions
null, // orderKeyIndexes
null);
final long managedMemory;
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
managedMemory = 0L;
generatedOperator = AggWithoutKeysCodeGenerator.genWithoutKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, isMerge, isFinal, "NoGrouping");
} else {
managedMemory = config.get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_HASH_AGG_MEMORY).getBytes();
generatedOperator = new HashAggCodeGenerator(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, grouping, auxGrouping, isMerge, isFinal).genWithKeys();
}
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), new CodeGenOperatorFactory<>(generatedOperator), InternalTypeInfo.of(outputRowType), inputTransform.getParallelism(), managedMemory);
}
use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.
the class BatchExecSortAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = (RowType) getOutputType();
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
final AggregateInfoList aggInfos = AggregateUtil.transformToBatchAggregateInfoList(aggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), null, null);
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
generatedOperator = AggWithoutKeysCodeGenerator.genWithoutKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, isMerge, isFinal, "NoGrouping");
} else {
generatedOperator = SortAggCodeGenerator.genWithKeys(ctx, planner.getRelBuilder(), aggInfos, inputRowType, outputRowType, grouping, auxGrouping, isMerge, isFinal);
}
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), new CodeGenOperatorFactory<>(generatedOperator), InternalTypeInfo.of(outputRowType), inputTransform.getParallelism());
}
use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.
the class BatchExecSortWindowAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final AggregateInfoList aggInfos = AggregateUtil.transformToBatchAggregateInfoList(aggInputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), // aggCallNeedRetractions
null, // orderKeyIndexes
null);
final int groupBufferLimitSize = config.get(ExecutionConfigOptions.TABLE_EXEC_WINDOW_AGG_BUFFER_SIZE_LIMIT);
final Tuple2<Long, Long> windowSizeAndSlideSize = WindowCodeGenerator.getWindowDef(window);
final SortWindowCodeGenerator windowCodeGenerator = new SortWindowCodeGenerator(new CodeGeneratorContext(config.getTableConfig()), planner.getRelBuilder(), window, inputTimeFieldIndex, inputTimeIsDate, JavaScalaConversionUtil.toScala(Arrays.asList(namedWindowProperties)), aggInfos, (RowType) inputEdge.getOutputType(), (RowType) getOutputType(), groupBufferLimitSize, // windowStart
0L, windowSizeAndSlideSize.f0, windowSizeAndSlideSize.f1, grouping, auxGrouping, enableAssignPane, isMerge, isFinal);
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
generatedOperator = windowCodeGenerator.genWithoutKeys();
} else {
generatedOperator = windowCodeGenerator.genWithKeys();
}
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), new CodeGenOperatorFactory<>(generatedOperator), InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
}
use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.
the class CommonExecCalc method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig()).setOperatorBaseClass(operatorBaseClass);
final CodeGenOperatorFactory<RowData> substituteStreamOperator = CalcCodeGenerator.generateCalcOperator(ctx, inputTransform, (RowType) getOutputType(), JavaScalaConversionUtil.toScala(projection), JavaScalaConversionUtil.toScala(Optional.ofNullable(this.condition)), retainHeader, getClass().getSimpleName());
return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(CALC_TRANSFORMATION, config), substituteStreamOperator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
}
use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.
the class BatchExecNestedLoopJoin method translateToPlanInternal.
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
ExecEdge leftInputEdge = getInputEdges().get(0);
ExecEdge rightInputEdge = getInputEdges().get(1);
Transformation<RowData> leftInputTransform = (Transformation<RowData>) leftInputEdge.translateToPlan(planner);
Transformation<RowData> rightInputTransform = (Transformation<RowData>) rightInputEdge.translateToPlan(planner);
// get input types
RowType leftType = (RowType) leftInputEdge.getOutputType();
RowType rightType = (RowType) rightInputEdge.getOutputType();
CodeGenOperatorFactory<RowData> operator = new NestedLoopJoinCodeGenerator(new CodeGeneratorContext(config.getTableConfig()), singleRowJoin, leftIsBuild, leftType, rightType, (RowType) getOutputType(), joinType, condition).gen();
int parallelism = leftInputTransform.getParallelism();
if (leftIsBuild) {
parallelism = rightInputTransform.getParallelism();
}
long manageMem = 0;
if (!singleRowJoin) {
manageMem = config.get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_EXTERNAL_BUFFER_MEMORY).getBytes();
}
return ExecNodeUtil.createTwoInputTransformation(leftInputTransform, rightInputTransform, createTransformationName(config), createTransformationDescription(config), operator, InternalTypeInfo.of(getOutputType()), parallelism, manageMem);
}
Aggregations