Search in sources :

Example 51 with ExecEdge

use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.

the class StreamExecChangelogNormalize method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final InternalTypeInfo<RowData> rowTypeInfo = (InternalTypeInfo<RowData>) inputTransform.getOutputType();
    final OneInputStreamOperator<RowData, RowData> operator;
    final long stateIdleTime = config.getStateRetentionTime();
    final boolean isMiniBatchEnabled = config.get(ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED);
    GeneratedRecordEqualiser generatedEqualiser = new EqualiserCodeGenerator(rowTypeInfo.toRowType()).generateRecordEqualiser("DeduplicateRowEqualiser");
    if (isMiniBatchEnabled) {
        TypeSerializer<RowData> rowSerializer = rowTypeInfo.createSerializer(planner.getExecEnv().getConfig());
        ProcTimeMiniBatchDeduplicateKeepLastRowFunction processFunction = new ProcTimeMiniBatchDeduplicateKeepLastRowFunction(rowTypeInfo, rowSerializer, stateIdleTime, generateUpdateBefore, // generateInsert
        true, // inputInsertOnly
        false, generatedEqualiser);
        CountBundleTrigger<RowData> trigger = AggregateUtil.createMiniBatchTrigger(config);
        operator = new KeyedMapBundleOperator<>(processFunction, trigger);
    } else {
        ProcTimeDeduplicateKeepLastRowFunction processFunction = new ProcTimeDeduplicateKeepLastRowFunction(rowTypeInfo, stateIdleTime, generateUpdateBefore, // generateInsert
        true, // inputInsertOnly
        false, generatedEqualiser);
        operator = new KeyedProcessOperator<>(processFunction);
    }
    final OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(CHANGELOG_NORMALIZE_TRANSFORMATION, config), operator, rowTypeInfo, inputTransform.getParallelism());
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(uniqueKeys, rowTypeInfo);
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) InternalTypeInfo(org.apache.flink.table.runtime.typeutils.InternalTypeInfo) EqualiserCodeGenerator(org.apache.flink.table.planner.codegen.EqualiserCodeGenerator) GeneratedRecordEqualiser(org.apache.flink.table.runtime.generated.GeneratedRecordEqualiser) RowData(org.apache.flink.table.data.RowData) ProcTimeMiniBatchDeduplicateKeepLastRowFunction(org.apache.flink.table.runtime.operators.deduplicate.ProcTimeMiniBatchDeduplicateKeepLastRowFunction) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) ProcTimeDeduplicateKeepLastRowFunction(org.apache.flink.table.runtime.operators.deduplicate.ProcTimeDeduplicateKeepLastRowFunction)

Example 52 with ExecEdge

use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.

the class InputPriorityConflictResolverTest method testDeadlockCausedByExchange.

@Test
public void testDeadlockCausedByExchange() {
    // P1 = PIPELINED + priority 1
    // 
    // 0 -(P0)-> exchange --(P0)-> 1
    // \-(P1)-/
    TestingBatchExecNode[] nodes = new TestingBatchExecNode[2];
    for (int i = 0; i < nodes.length; i++) {
        nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
    }
    BatchExecExchange exchange = new BatchExecExchange(InputProperty.builder().requiredDistribution(InputProperty.ANY_DISTRIBUTION).build(), (RowType) nodes[0].getOutputType(), "Exchange");
    exchange.setRequiredExchangeMode(StreamExchangeMode.BATCH);
    ExecEdge execEdge = ExecEdge.builder().source(nodes[0]).target(exchange).build();
    exchange.setInputEdges(Collections.singletonList(execEdge));
    nodes[1].addInput(exchange, InputProperty.builder().priority(0).build());
    nodes[1].addInput(exchange, InputProperty.builder().priority(1).build());
    InputPriorityConflictResolver resolver = new InputPriorityConflictResolver(Collections.singletonList(nodes[1]), InputProperty.DamBehavior.END_INPUT, StreamExchangeMode.BATCH, new Configuration());
    resolver.detectAndResolve();
    ExecNode<?> input0 = nodes[1].getInputNodes().get(0);
    ExecNode<?> input1 = nodes[1].getInputNodes().get(1);
    Assert.assertNotSame(input0, input1);
    Consumer<ExecNode<?>> checkExchange = execNode -> {
        Assert.assertTrue(execNode instanceof BatchExecExchange);
        BatchExecExchange e = (BatchExecExchange) execNode;
        Assert.assertEquals(Optional.of(StreamExchangeMode.BATCH), e.getRequiredExchangeMode());
        Assert.assertEquals(nodes[0], e.getInputEdges().get(0).getSource());
    };
    checkExchange.accept(input0);
    checkExchange.accept(input1);
}
Also used : InputProperty(org.apache.flink.table.planner.plan.nodes.exec.InputProperty) Configuration(org.apache.flink.configuration.Configuration) TestingBatchExecNode(org.apache.flink.table.planner.plan.nodes.exec.TestingBatchExecNode) Test(org.junit.Test) RowType(org.apache.flink.table.types.logical.RowType) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) Consumer(java.util.function.Consumer) StreamExchangeMode(org.apache.flink.streaming.api.transformations.StreamExchangeMode) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) BatchExecExchange(org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecExchange) Optional(java.util.Optional) Assert(org.junit.Assert) Collections(java.util.Collections) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) Configuration(org.apache.flink.configuration.Configuration) TestingBatchExecNode(org.apache.flink.table.planner.plan.nodes.exec.TestingBatchExecNode) BatchExecExchange(org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecExchange) TestingBatchExecNode(org.apache.flink.table.planner.plan.nodes.exec.TestingBatchExecNode) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) Test(org.junit.Test)

Example 53 with ExecEdge

use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.

the class StreamExecPythonGroupAggregate method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    if (grouping.length > 0 && config.getStateRetentionTime() < 0) {
        LOG.warn("No state retention interval configured for a query which accumulates state. " + "Please provide a query configuration with valid retention interval " + "to prevent excessive state size. You may specify a retention time " + "of 0 to not clean up the state.");
    }
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final RowType inputRowType = (RowType) inputEdge.getOutputType();
    final AggregateInfoList aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(inputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), aggCallNeedRetractions, needRetraction, // isStateBackendDataViews
    true, // needDistinctInfo
    true);
    final int inputCountIndex = aggInfoList.getIndexOfCountStar();
    final boolean countStarInserted = aggInfoList.countStarInserted();
    Tuple2<PythonAggregateFunctionInfo[], DataViewSpec[][]> aggInfosAndDataViewSpecs = CommonPythonUtil.extractPythonAggregateFunctionInfos(aggInfoList, aggCalls);
    PythonAggregateFunctionInfo[] pythonFunctionInfos = aggInfosAndDataViewSpecs.f0;
    DataViewSpec[][] dataViewSpecs = aggInfosAndDataViewSpecs.f1;
    Configuration pythonConfig = CommonPythonUtil.getMergedConfig(planner.getExecEnv(), config.getTableConfig());
    final OneInputStreamOperator<RowData, RowData> operator = getPythonAggregateFunctionOperator(pythonConfig, inputRowType, InternalTypeInfo.of(getOutputType()).toRowType(), pythonFunctionInfos, dataViewSpecs, config.getStateRetentionTime(), config.getMaxIdleStateRetentionTime(), inputCountIndex, countStarInserted);
    // partitioned aggregation
    OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationName(config), createTransformationDescription(config), operator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
    if (CommonPythonUtil.isPythonWorkerUsingManagedMemory(pythonConfig)) {
        transform.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON);
    }
    // set KeyType and Selector for state
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(grouping, InternalTypeInfo.of(inputRowType));
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) AggregateInfoList(org.apache.flink.table.planner.plan.utils.AggregateInfoList) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) DataViewSpec(org.apache.flink.table.runtime.dataview.DataViewSpec) Configuration(org.apache.flink.configuration.Configuration) RowType(org.apache.flink.table.types.logical.RowType) RowData(org.apache.flink.table.data.RowData) PythonAggregateFunctionInfo(org.apache.flink.table.functions.python.PythonAggregateFunctionInfo) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector)

Example 54 with ExecEdge

use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.

the class StreamExecPythonGroupWindowAggregate method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    final boolean isCountWindow;
    if (window instanceof TumblingGroupWindow) {
        isCountWindow = hasRowIntervalType(((TumblingGroupWindow) window).size());
    } else if (window instanceof SlidingGroupWindow) {
        isCountWindow = hasRowIntervalType(((SlidingGroupWindow) window).size());
    } else {
        isCountWindow = false;
    }
    if (isCountWindow && grouping.length > 0 && config.getStateRetentionTime() < 0) {
        LOGGER.warn("No state retention interval configured for a query which accumulates state." + " Please provide a query configuration with valid retention interval to" + " prevent excessive state size. You may specify a retention time of 0 to" + " not clean up the state.");
    }
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final RowType inputRowType = (RowType) inputEdge.getOutputType();
    final RowType outputRowType = InternalTypeInfo.of(getOutputType()).toRowType();
    final int inputTimeFieldIndex;
    if (isRowtimeAttribute(window.timeAttribute())) {
        inputTimeFieldIndex = timeFieldIndex(FlinkTypeFactory.INSTANCE().buildRelNodeRowType(inputRowType), planner.getRelBuilder(), window.timeAttribute());
        if (inputTimeFieldIndex < 0) {
            throw new TableException("Group window must defined on a time attribute, " + "but the time attribute can't be found.\n" + "This should never happen. Please file an issue.");
        }
    } else {
        inputTimeFieldIndex = -1;
    }
    final ZoneId shiftTimeZone = TimeWindowUtil.getShiftTimeZone(window.timeAttribute().getOutputDataType().getLogicalType(), config.getLocalTimeZone());
    Tuple2<WindowAssigner<?>, Trigger<?>> windowAssignerAndTrigger = generateWindowAssignerAndTrigger();
    WindowAssigner<?> windowAssigner = windowAssignerAndTrigger.f0;
    Trigger<?> trigger = windowAssignerAndTrigger.f1;
    Configuration pythonConfig = CommonPythonUtil.getMergedConfig(planner.getExecEnv(), config.getTableConfig());
    boolean isGeneralPythonUDAF = Arrays.stream(aggCalls).anyMatch(x -> PythonUtil.isPythonAggregate(x, PythonFunctionKind.GENERAL));
    OneInputTransformation<RowData, RowData> transform;
    WindowEmitStrategy emitStrategy = WindowEmitStrategy.apply(config, window);
    if (isGeneralPythonUDAF) {
        final boolean[] aggCallNeedRetractions = new boolean[aggCalls.length];
        Arrays.fill(aggCallNeedRetractions, needRetraction);
        final AggregateInfoList aggInfoList = transformToStreamAggregateInfoList(inputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), aggCallNeedRetractions, needRetraction, true, true);
        transform = createGeneralPythonStreamWindowGroupOneInputTransformation(inputTransform, inputRowType, outputRowType, inputTimeFieldIndex, windowAssigner, aggInfoList, emitStrategy.getAllowLateness(), pythonConfig, shiftTimeZone);
    } else {
        transform = createPandasPythonStreamWindowGroupOneInputTransformation(inputTransform, inputRowType, outputRowType, inputTimeFieldIndex, windowAssigner, trigger, emitStrategy.getAllowLateness(), pythonConfig, config, shiftTimeZone);
    }
    if (CommonPythonUtil.isPythonWorkerUsingManagedMemory(pythonConfig)) {
        transform.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON);
    }
    // set KeyType and Selector for state
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(grouping, InternalTypeInfo.of(inputRowType));
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) TableException(org.apache.flink.table.api.TableException) AggregateInfoList(org.apache.flink.table.planner.plan.utils.AggregateInfoList) AggregateUtil.transformToStreamAggregateInfoList(org.apache.flink.table.planner.plan.utils.AggregateUtil.transformToStreamAggregateInfoList) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) ZoneId(java.time.ZoneId) Configuration(org.apache.flink.configuration.Configuration) RowType(org.apache.flink.table.types.logical.RowType) RowData(org.apache.flink.table.data.RowData) SlidingWindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.SlidingWindowAssigner) CountTumblingWindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.CountTumblingWindowAssigner) WindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.WindowAssigner) TumblingWindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.TumblingWindowAssigner) CountSlidingWindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.CountSlidingWindowAssigner) SessionWindowAssigner(org.apache.flink.table.runtime.operators.window.assigners.SessionWindowAssigner) TumblingGroupWindow(org.apache.flink.table.planner.plan.logical.TumblingGroupWindow) Trigger(org.apache.flink.table.runtime.operators.window.triggers.Trigger) WindowEmitStrategy(org.apache.flink.table.planner.plan.utils.WindowEmitStrategy) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) SlidingGroupWindow(org.apache.flink.table.planner.plan.logical.SlidingGroupWindow)

Example 55 with ExecEdge

use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.

the class StreamExecRank method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    switch(rankType) {
        case ROW_NUMBER:
            break;
        case RANK:
            throw new TableException("RANK() on streaming table is not supported currently");
        case DENSE_RANK:
            throw new TableException("DENSE_RANK() on streaming table is not supported currently");
        default:
            throw new TableException(String.format("Streaming tables do not support %s rank function.", rankType));
    }
    ExecEdge inputEdge = getInputEdges().get(0);
    Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    RowType inputType = (RowType) inputEdge.getOutputType();
    InternalTypeInfo<RowData> inputRowTypeInfo = InternalTypeInfo.of(inputType);
    int[] sortFields = sortSpec.getFieldIndices();
    RowDataKeySelector sortKeySelector = KeySelectorUtil.getRowDataSelector(sortFields, inputRowTypeInfo);
    // create a sort spec on sort keys.
    int[] sortKeyPositions = IntStream.range(0, sortFields.length).toArray();
    SortSpec.SortSpecBuilder builder = SortSpec.builder();
    IntStream.range(0, sortFields.length).forEach(idx -> builder.addField(idx, sortSpec.getFieldSpec(idx).getIsAscendingOrder(), sortSpec.getFieldSpec(idx).getNullIsLast()));
    SortSpec sortSpecInSortKey = builder.build();
    GeneratedRecordComparator sortKeyComparator = ComparatorCodeGenerator.gen(config.getTableConfig(), "StreamExecSortComparator", RowType.of(sortSpec.getFieldTypes(inputType)), sortSpecInSortKey);
    long cacheSize = config.get(TABLE_EXEC_RANK_TOPN_CACHE_SIZE);
    StateTtlConfig ttlConfig = StateConfigUtil.createTtlConfig(config.getStateRetentionTime());
    AbstractTopNFunction processFunction;
    if (rankStrategy instanceof RankProcessStrategy.AppendFastStrategy) {
        if (sortFields.length == 1 && TypeCheckUtils.isProcTime(inputType.getChildren().get(sortFields[0])) && sortSpec.getFieldSpec(0).getIsAscendingOrder()) {
            processFunction = new AppendOnlyFirstNFunction(ttlConfig, inputRowTypeInfo, sortKeyComparator, sortKeySelector, rankType, rankRange, generateUpdateBefore, outputRankNumber);
        } else if (RankUtil.isTop1(rankRange)) {
            processFunction = new FastTop1Function(ttlConfig, inputRowTypeInfo, sortKeyComparator, sortKeySelector, rankType, rankRange, generateUpdateBefore, outputRankNumber, cacheSize);
        } else {
            processFunction = new AppendOnlyTopNFunction(ttlConfig, inputRowTypeInfo, sortKeyComparator, sortKeySelector, rankType, rankRange, generateUpdateBefore, outputRankNumber, cacheSize);
        }
    } else if (rankStrategy instanceof RankProcessStrategy.UpdateFastStrategy) {
        if (RankUtil.isTop1(rankRange)) {
            processFunction = new FastTop1Function(ttlConfig, inputRowTypeInfo, sortKeyComparator, sortKeySelector, rankType, rankRange, generateUpdateBefore, outputRankNumber, cacheSize);
        } else {
            RankProcessStrategy.UpdateFastStrategy updateFastStrategy = (RankProcessStrategy.UpdateFastStrategy) rankStrategy;
            int[] primaryKeys = updateFastStrategy.getPrimaryKeys();
            RowDataKeySelector rowKeySelector = KeySelectorUtil.getRowDataSelector(primaryKeys, inputRowTypeInfo);
            processFunction = new UpdatableTopNFunction(ttlConfig, inputRowTypeInfo, rowKeySelector, sortKeyComparator, sortKeySelector, rankType, rankRange, generateUpdateBefore, outputRankNumber, cacheSize);
        }
    // TODO Use UnaryUpdateTopNFunction after SortedMapState is merged
    } else if (rankStrategy instanceof RankProcessStrategy.RetractStrategy) {
        EqualiserCodeGenerator equaliserCodeGen = new EqualiserCodeGenerator(inputType.getFields().stream().map(RowType.RowField::getType).toArray(LogicalType[]::new));
        GeneratedRecordEqualiser generatedEqualiser = equaliserCodeGen.generateRecordEqualiser("RankValueEqualiser");
        ComparableRecordComparator comparator = new ComparableRecordComparator(sortKeyComparator, sortKeyPositions, sortSpec.getFieldTypes(inputType), sortSpec.getAscendingOrders(), sortSpec.getNullsIsLast());
        processFunction = new RetractableTopNFunction(ttlConfig, inputRowTypeInfo, comparator, sortKeySelector, rankType, rankRange, generatedEqualiser, generateUpdateBefore, outputRankNumber);
    } else {
        throw new TableException(String.format("rank strategy:%s is not supported.", rankStrategy));
    }
    KeyedProcessOperator<RowData, RowData, RowData> operator = new KeyedProcessOperator<>(processFunction);
    processFunction.setKeyContext(operator);
    OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(RANK_TRANSFORMATION, config), operator, InternalTypeInfo.of((RowType) getOutputType()), inputTransform.getParallelism());
    // set KeyType and Selector for state
    RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(partitionSpec.getFieldIndices(), inputRowTypeInfo);
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) UpdatableTopNFunction(org.apache.flink.table.runtime.operators.rank.UpdatableTopNFunction) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) RowType(org.apache.flink.table.types.logical.RowType) LogicalType(org.apache.flink.table.types.logical.LogicalType) FastTop1Function(org.apache.flink.table.runtime.operators.rank.FastTop1Function) GeneratedRecordEqualiser(org.apache.flink.table.runtime.generated.GeneratedRecordEqualiser) RankProcessStrategy(org.apache.flink.table.planner.plan.utils.RankProcessStrategy) RowData(org.apache.flink.table.data.RowData) AbstractTopNFunction(org.apache.flink.table.runtime.operators.rank.AbstractTopNFunction) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) KeyedProcessOperator(org.apache.flink.streaming.api.operators.KeyedProcessOperator) TableException(org.apache.flink.table.api.TableException) AppendOnlyTopNFunction(org.apache.flink.table.runtime.operators.rank.AppendOnlyTopNFunction) AppendOnlyFirstNFunction(org.apache.flink.table.runtime.operators.rank.AppendOnlyFirstNFunction) EqualiserCodeGenerator(org.apache.flink.table.planner.codegen.EqualiserCodeGenerator) StateTtlConfig(org.apache.flink.api.common.state.StateTtlConfig) RetractableTopNFunction(org.apache.flink.table.runtime.operators.rank.RetractableTopNFunction) ComparableRecordComparator(org.apache.flink.table.runtime.operators.rank.ComparableRecordComparator) GeneratedRecordComparator(org.apache.flink.table.runtime.generated.GeneratedRecordComparator) SortSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.SortSpec)

Aggregations

ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)65 Transformation (org.apache.flink.api.dag.Transformation)52 RowData (org.apache.flink.table.data.RowData)52 RowType (org.apache.flink.table.types.logical.RowType)42 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)25 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)24 TableException (org.apache.flink.table.api.TableException)21 CodeGeneratorContext (org.apache.flink.table.planner.codegen.CodeGeneratorContext)18 AggregateInfoList (org.apache.flink.table.planner.plan.utils.AggregateInfoList)17 LogicalType (org.apache.flink.table.types.logical.LogicalType)14 ArrayList (java.util.ArrayList)12 InputProperty (org.apache.flink.table.planner.plan.nodes.exec.InputProperty)11 Configuration (org.apache.flink.configuration.Configuration)10 ExecNode (org.apache.flink.table.planner.plan.nodes.exec.ExecNode)10 ZoneId (java.time.ZoneId)9 List (java.util.List)5 EqualiserCodeGenerator (org.apache.flink.table.planner.codegen.EqualiserCodeGenerator)5 AggsHandlerCodeGenerator (org.apache.flink.table.planner.codegen.agg.AggsHandlerCodeGenerator)5 GeneratedAggsHandleFunction (org.apache.flink.table.runtime.generated.GeneratedAggsHandleFunction)5 GeneratedJoinCondition (org.apache.flink.table.runtime.generated.GeneratedJoinCondition)5