use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class BatchExecPythonGroupAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = InternalTypeInfo.of(getOutputType()).toRowType();
Configuration pythonConfig = CommonPythonUtil.getMergedConfig(planner.getExecEnv(), config.getTableConfig());
OneInputTransformation<RowData, RowData> transform = createPythonOneInputTransformation(inputTransform, inputRowType, outputRowType, pythonConfig, config);
if (CommonPythonUtil.isPythonWorkerUsingManagedMemory(pythonConfig)) {
transform.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON);
}
return transform;
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class BatchExecPythonOverAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputType = (RowType) inputEdge.getOutputType();
List<OverSpec.GroupSpec> groups = overSpec.getGroups();
boolean[] isRangeWindows = new boolean[groups.size()];
for (int i = 0; i < groups.size(); i++) {
OverSpec.GroupSpec group = groups.get(i);
List<AggregateCall> groupAggCalls = group.getAggCalls();
aggCalls.addAll(groupAggCalls);
for (int j = 0; j < groupAggCalls.size(); j++) {
aggWindowIndex.add(i);
}
OverWindowMode mode = inferGroupMode(group);
if (mode == OverWindowMode.ROW) {
isRangeWindows[i] = false;
if (isUnboundedWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(Long.MAX_VALUE);
} else if (isUnboundedPrecedingWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else if (isUnboundedFollowingWindow(group)) {
lowerBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(Long.MAX_VALUE);
} else if (isSlidingWindow(group)) {
lowerBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else {
throw new TableException("Unsupported row window group spec " + group);
}
} else {
isRangeWindows[i] = true;
if (isUnboundedWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(Long.MAX_VALUE);
} else if (isUnboundedPrecedingWindow(group)) {
lowerBoundary.add(Long.MIN_VALUE);
upperBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else if (isUnboundedFollowingWindow(group)) {
lowerBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(Long.MAX_VALUE);
} else if (isSlidingWindow(group)) {
lowerBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getLowerBound()));
upperBoundary.add(OverAggregateUtil.getLongBoundary(overSpec, group.getUpperBound()));
} else {
throw new TableException("Unsupported range window group spec " + group);
}
}
}
Configuration pythonConfig = CommonPythonUtil.getMergedConfig(planner.getExecEnv(), config.getTableConfig());
OneInputTransformation<RowData, RowData> transform = createPythonOneInputTransformation(inputTransform, inputType, InternalTypeInfo.of(getOutputType()).toRowType(), isRangeWindows, pythonConfig, config);
if (CommonPythonUtil.isPythonWorkerUsingManagedMemory(pythonConfig)) {
transform.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON);
}
return transform;
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class BatchExecPythonOverAggregate method getPythonOverWindowAggregateFunctionOperator.
@SuppressWarnings("unchecked")
private OneInputStreamOperator<RowData, RowData> getPythonOverWindowAggregateFunctionOperator(ExecNodeConfig config, Configuration pythonConfig, RowType inputRowType, RowType outputRowType, boolean[] isRangeWindows, int[] udafInputOffsets, PythonFunctionInfo[] pythonFunctionInfos) {
Class<?> clazz = CommonPythonUtil.loadClass(ARROW_PYTHON_OVER_WINDOW_AGGREGATE_FUNCTION_OPERATOR_NAME);
RowType udfInputType = (RowType) Projection.of(udafInputOffsets).project(inputRowType);
RowType udfOutputType = (RowType) Projection.range(inputRowType.getFieldCount(), outputRowType.getFieldCount()).project(outputRowType);
PartitionSpec partitionSpec = overSpec.getPartition();
List<OverSpec.GroupSpec> groups = overSpec.getGroups();
SortSpec sortSpec = groups.get(groups.size() - 1).getSort();
try {
Constructor<?> ctor = clazz.getConstructor(Configuration.class, PythonFunctionInfo[].class, RowType.class, RowType.class, RowType.class, long[].class, long[].class, boolean[].class, int[].class, int.class, boolean.class, GeneratedProjection.class, GeneratedProjection.class, GeneratedProjection.class);
return (OneInputStreamOperator<RowData, RowData>) ctor.newInstance(pythonConfig, pythonFunctionInfos, inputRowType, udfInputType, udfOutputType, lowerBoundary.stream().mapToLong(i -> i).toArray(), upperBoundary.stream().mapToLong(i -> i).toArray(), isRangeWindows, aggWindowIndex.stream().mapToInt(i -> i).toArray(), sortSpec.getFieldIndices()[0], sortSpec.getAscendingOrders()[0], ProjectionCodeGenerator.generateProjection(CodeGeneratorContext.apply(config.getTableConfig()), "UdafInputProjection", inputRowType, udfInputType, udafInputOffsets), ProjectionCodeGenerator.generateProjection(CodeGeneratorContext.apply(config.getTableConfig()), "GroupKey", inputRowType, (RowType) Projection.of(partitionSpec.getFieldIndices()).project(inputRowType), partitionSpec.getFieldIndices()), ProjectionCodeGenerator.generateProjection(CodeGeneratorContext.apply(config.getTableConfig()), "GroupSet", inputRowType, (RowType) Projection.of(partitionSpec.getFieldIndices()).project(inputRowType), partitionSpec.getFieldIndices()));
} catch (NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new TableException("Python BatchArrowPythonOverWindowAggregateFunctionOperator constructed failed.", e);
}
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class CommonExecLegacySink method translateToTransformation.
/**
* Translates {@link TableSink} into a {@link Transformation}.
*
* @param withChangeFlag Set to true to emit records with change flags.
* @return The {@link Transformation} that corresponds to the translated {@link TableSink}.
*/
@SuppressWarnings("unchecked")
private Transformation<T> translateToTransformation(PlannerBase planner, ExecNodeConfig config, boolean withChangeFlag) {
// if no change flags are requested, verify table is an insert-only (append-only) table.
if (!withChangeFlag && needRetraction) {
throw new TableException("Table is not an append-only table. " + "Use the toRetractStream() in order to handle add and retract messages.");
}
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType convertedInputRowType = checkAndConvertInputTypeIfNeeded(inputRowType);
final DataType resultDataType = tableSink.getConsumedDataType();
if (CodeGenUtils.isInternalClass(resultDataType)) {
return (Transformation<T>) inputTransform;
} else {
final int rowtimeIndex = getRowtimeIndex(inputRowType);
final DataType physicalOutputType = TableSinkUtils.inferSinkPhysicalDataType(resultDataType, convertedInputRowType, withChangeFlag);
final TypeInformation<T> outputTypeInfo = SinkCodeGenerator.deriveSinkOutputTypeInfo(tableSink, physicalOutputType, withChangeFlag);
final CodeGenOperatorFactory<T> converterOperator = SinkCodeGenerator.generateRowConverterOperator(new CodeGeneratorContext(config.getTableConfig()), convertedInputRowType, tableSink, physicalOutputType, withChangeFlag, "SinkConversion", rowtimeIndex);
final String description = "SinkConversion To " + resultDataType.getConversionClass().getSimpleName();
return ExecNodeUtil.createOneInputTransformation(inputTransform, createFormattedTransformationName(description, "SinkConversion", config), createFormattedTransformationDescription(description, config), converterOperator, outputTypeInfo, inputTransform.getParallelism());
}
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class CommonExecLookupJoin method createAsyncLookupJoin.
@SuppressWarnings("unchecked")
private StreamOperatorFactory<RowData> createAsyncLookupJoin(RelOptTable temporalTable, ExecNodeConfig config, Map<Integer, LookupJoinUtil.LookupKey> allLookupKeys, AsyncTableFunction<Object> asyncLookupFunction, RelBuilder relBuilder, RowType inputRowType, RowType tableSourceRowType, RowType resultRowType, boolean isLeftOuterJoin) {
int asyncBufferCapacity = config.get(ExecutionConfigOptions.TABLE_EXEC_ASYNC_LOOKUP_BUFFER_CAPACITY);
long asyncTimeout = config.get(ExecutionConfigOptions.TABLE_EXEC_ASYNC_LOOKUP_TIMEOUT).toMillis();
DataTypeFactory dataTypeFactory = ShortcutUtils.unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
LookupJoinCodeGenerator.GeneratedTableFunctionWithDataType<AsyncFunction<RowData, Object>> generatedFuncWithType = LookupJoinCodeGenerator.generateAsyncLookupFunction(config.getTableConfig(), dataTypeFactory, inputRowType, tableSourceRowType, resultRowType, allLookupKeys, LookupJoinUtil.getOrderedLookupKeys(allLookupKeys.keySet()), asyncLookupFunction, StringUtils.join(temporalTable.getQualifiedName(), "."));
RowType rightRowType = Optional.ofNullable(temporalTableOutputType).map(FlinkTypeFactory::toLogicalRowType).orElse(tableSourceRowType);
// a projection or filter after table source scan
GeneratedResultFuture<TableFunctionResultFuture<RowData>> generatedResultFuture = LookupJoinCodeGenerator.generateTableAsyncCollector(config.getTableConfig(), "TableFunctionResultFuture", inputRowType, rightRowType, JavaScalaConversionUtil.toScala(Optional.ofNullable(joinCondition)));
DataStructureConverter<?, ?> fetcherConverter = DataStructureConverters.getConverter(generatedFuncWithType.dataType());
AsyncFunction<RowData, RowData> asyncFunc;
if (existCalcOnTemporalTable) {
// a projection or filter after table source scan
GeneratedFunction<FlatMapFunction<RowData, RowData>> generatedCalc = LookupJoinCodeGenerator.generateCalcMapFunction(config.getTableConfig(), JavaScalaConversionUtil.toScala(projectionOnTemporalTable), filterOnTemporalTable, temporalTableOutputType, tableSourceRowType);
asyncFunc = new AsyncLookupJoinWithCalcRunner(generatedFuncWithType.tableFunc(), (DataStructureConverter<RowData, Object>) fetcherConverter, generatedCalc, generatedResultFuture, InternalSerializers.create(rightRowType), isLeftOuterJoin, asyncBufferCapacity);
} else {
// right type is the same as table source row type, because no calc after temporal table
asyncFunc = new AsyncLookupJoinRunner(generatedFuncWithType.tableFunc(), (DataStructureConverter<RowData, Object>) fetcherConverter, generatedResultFuture, InternalSerializers.create(rightRowType), isLeftOuterJoin, asyncBufferCapacity);
}
// when the downstream do not need orderness
return new AsyncWaitOperatorFactory<>(asyncFunc, asyncTimeout, asyncBufferCapacity, AsyncDataStream.OutputMode.ORDERED);
}
Aggregations