Search in sources :

Example 1 with CodeGeneratorContext

use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.

the class StreamExecGroupAggregate method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    if (grouping.length > 0 && config.getStateRetentionTime() < 0) {
        LOG.warn("No state retention interval configured for a query which accumulates state. " + "Please provide a query configuration with valid retention interval to prevent excessive " + "state size. You may specify a retention time of 0 to not clean up the state.");
    }
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final RowType inputRowType = (RowType) inputEdge.getOutputType();
    final AggsHandlerCodeGenerator generator = new AggsHandlerCodeGenerator(new CodeGeneratorContext(config.getTableConfig()), planner.getRelBuilder(), JavaScalaConversionUtil.toScala(inputRowType.getChildren()), // TODO: but other operators do not copy this input field.....
    true).needAccumulate();
    if (needRetraction) {
        generator.needRetract();
    }
    final AggregateInfoList aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(inputRowType, JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)), aggCallNeedRetractions, needRetraction, true, true);
    final GeneratedAggsHandleFunction aggsHandler = generator.generateAggsHandler("GroupAggsHandler", aggInfoList);
    final LogicalType[] accTypes = Arrays.stream(aggInfoList.getAccTypes()).map(LogicalTypeDataTypeConverter::fromDataTypeToLogicalType).toArray(LogicalType[]::new);
    final LogicalType[] aggValueTypes = Arrays.stream(aggInfoList.getActualValueTypes()).map(LogicalTypeDataTypeConverter::fromDataTypeToLogicalType).toArray(LogicalType[]::new);
    final GeneratedRecordEqualiser recordEqualiser = new EqualiserCodeGenerator(aggValueTypes).generateRecordEqualiser("GroupAggValueEqualiser");
    final int inputCountIndex = aggInfoList.getIndexOfCountStar();
    final boolean isMiniBatchEnabled = config.get(ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED);
    final OneInputStreamOperator<RowData, RowData> operator;
    if (isMiniBatchEnabled) {
        MiniBatchGroupAggFunction aggFunction = new MiniBatchGroupAggFunction(aggsHandler, recordEqualiser, accTypes, inputRowType, inputCountIndex, generateUpdateBefore, config.getStateRetentionTime());
        operator = new KeyedMapBundleOperator<>(aggFunction, AggregateUtil.createMiniBatchTrigger(config));
    } else {
        GroupAggFunction aggFunction = new GroupAggFunction(aggsHandler, recordEqualiser, accTypes, inputCountIndex, generateUpdateBefore, config.getStateRetentionTime());
        operator = new KeyedProcessOperator<>(aggFunction);
    }
    // partitioned aggregation
    final OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(GROUP_AGGREGATE_TRANSFORMATION, config), operator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
    // set KeyType and Selector for state
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(grouping, InternalTypeInfo.of(inputRowType));
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) AggregateInfoList(org.apache.flink.table.planner.plan.utils.AggregateInfoList) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) GroupAggFunction(org.apache.flink.table.runtime.operators.aggregate.GroupAggFunction) MiniBatchGroupAggFunction(org.apache.flink.table.runtime.operators.aggregate.MiniBatchGroupAggFunction) MiniBatchGroupAggFunction(org.apache.flink.table.runtime.operators.aggregate.MiniBatchGroupAggFunction) RowType(org.apache.flink.table.types.logical.RowType) AggsHandlerCodeGenerator(org.apache.flink.table.planner.codegen.agg.AggsHandlerCodeGenerator) LogicalType(org.apache.flink.table.types.logical.LogicalType) GeneratedAggsHandleFunction(org.apache.flink.table.runtime.generated.GeneratedAggsHandleFunction) EqualiserCodeGenerator(org.apache.flink.table.planner.codegen.EqualiserCodeGenerator) GeneratedRecordEqualiser(org.apache.flink.table.runtime.generated.GeneratedRecordEqualiser) RowData(org.apache.flink.table.data.RowData) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector)

Example 2 with CodeGeneratorContext

use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.

the class StreamExecGroupWindowAggregate method createAggsHandler.

private GeneratedClass<?> createAggsHandler(AggregateInfoList aggInfoList, ExecNodeConfig config, RelBuilder relBuilder, List<LogicalType> fieldTypes, ZoneId shiftTimeZone) {
    final boolean needMerge;
    final Class<?> windowClass;
    if (window instanceof SlidingGroupWindow) {
        ValueLiteralExpression size = ((SlidingGroupWindow) window).size();
        needMerge = hasTimeIntervalType(size);
        windowClass = hasRowIntervalType(size) ? CountWindow.class : TimeWindow.class;
    } else if (window instanceof TumblingGroupWindow) {
        needMerge = false;
        ValueLiteralExpression size = ((TumblingGroupWindow) window).size();
        windowClass = hasRowIntervalType(size) ? CountWindow.class : TimeWindow.class;
    } else if (window instanceof SessionGroupWindow) {
        needMerge = true;
        windowClass = TimeWindow.class;
    } else {
        throw new TableException("Unsupported window: " + window.toString());
    }
    final AggsHandlerCodeGenerator generator = new AggsHandlerCodeGenerator(new CodeGeneratorContext(config.getTableConfig()), relBuilder, JavaScalaConversionUtil.toScala(fieldTypes), // copyInputField
    false).needAccumulate();
    if (needMerge) {
        generator.needMerge(0, false, null);
    }
    if (needRetraction) {
        generator.needRetract();
    }
    final List<WindowProperty> windowProperties = Arrays.asList(Arrays.stream(namedWindowProperties).map(NamedWindowProperty::getProperty).toArray(WindowProperty[]::new));
    final boolean isTableAggregate = isTableAggregate(Arrays.asList(aggInfoList.getActualAggregateCalls()));
    if (isTableAggregate) {
        return generator.generateNamespaceTableAggsHandler("GroupingWindowTableAggsHandler", aggInfoList, JavaScalaConversionUtil.toScala(windowProperties), windowClass, shiftTimeZone);
    } else {
        return generator.generateNamespaceAggsHandler("GroupingWindowAggsHandler", aggInfoList, JavaScalaConversionUtil.toScala(windowProperties), windowClass, shiftTimeZone);
    }
}
Also used : NamedWindowProperty(org.apache.flink.table.runtime.groupwindow.NamedWindowProperty) CountWindow(org.apache.flink.table.runtime.operators.window.CountWindow) TableException(org.apache.flink.table.api.TableException) ValueLiteralExpression(org.apache.flink.table.expressions.ValueLiteralExpression) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) AggsHandlerCodeGenerator(org.apache.flink.table.planner.codegen.agg.AggsHandlerCodeGenerator) TimeWindow(org.apache.flink.table.runtime.operators.window.TimeWindow) NamedWindowProperty(org.apache.flink.table.runtime.groupwindow.NamedWindowProperty) WindowProperty(org.apache.flink.table.runtime.groupwindow.WindowProperty) TumblingGroupWindow(org.apache.flink.table.planner.plan.logical.TumblingGroupWindow) SlidingGroupWindow(org.apache.flink.table.planner.plan.logical.SlidingGroupWindow) SessionGroupWindow(org.apache.flink.table.planner.plan.logical.SessionGroupWindow)

Example 3 with CodeGeneratorContext

use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.

the class StreamExecMatch method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final RowType inputRowType = (RowType) inputEdge.getOutputType();
    checkOrderKeys(inputRowType);
    final EventComparator<RowData> eventComparator = createEventComparator(config, inputRowType);
    final Transformation<RowData> timestampedInputTransform = translateOrder(inputTransform, inputRowType);
    final Tuple2<Pattern<RowData, RowData>, List<String>> cepPatternAndNames = translatePattern(matchSpec, config.getTableConfig(), planner.getRelBuilder(), inputRowType);
    final Pattern<RowData, RowData> cepPattern = cepPatternAndNames.f0;
    // TODO remove this once it is supported in CEP library
    if (NFACompiler.canProduceEmptyMatches(cepPattern)) {
        throw new TableException("Patterns that can produce empty matches are not supported. There must be at least one non-optional state.");
    }
    // TODO remove this once it is supported in CEP library
    if (cepPattern.getQuantifier().hasProperty(Quantifier.QuantifierProperty.GREEDY)) {
        throw new TableException("Greedy quantifiers are not allowed as the last element of a Pattern yet. " + "Finish your pattern with either a simple variable or reluctant quantifier.");
    }
    if (matchSpec.isAllRows()) {
        throw new TableException("All rows per match mode is not supported yet.");
    }
    final int[] partitionKeys = matchSpec.getPartition().getFieldIndices();
    final SortSpec.SortFieldSpec timeOrderField = matchSpec.getOrderKeys().getFieldSpec(0);
    final LogicalType timeOrderFieldType = inputRowType.getTypeAt(timeOrderField.getFieldIndex());
    final boolean isProctime = TypeCheckUtils.isProcTime(timeOrderFieldType);
    final InternalTypeInfo<RowData> inputTypeInfo = (InternalTypeInfo<RowData>) inputTransform.getOutputType();
    final TypeSerializer<RowData> inputSerializer = inputTypeInfo.createSerializer(planner.getExecEnv().getConfig());
    final NFACompiler.NFAFactory<RowData> nfaFactory = NFACompiler.compileFactory(cepPattern, false);
    final MatchCodeGenerator generator = new MatchCodeGenerator(new CodeGeneratorContext(config.getTableConfig()), planner.getRelBuilder(), // nullableInput
    false, JavaScalaConversionUtil.toScala(cepPatternAndNames.f1), JavaScalaConversionUtil.toScala(Optional.empty()), CodeGenUtils.DEFAULT_COLLECTOR_TERM());
    generator.bindInput(inputRowType, CodeGenUtils.DEFAULT_INPUT1_TERM(), JavaScalaConversionUtil.toScala(Optional.empty()));
    final PatternProcessFunctionRunner patternProcessFunction = generator.generateOneRowPerMatchExpression((RowType) getOutputType(), partitionKeys, matchSpec.getMeasures());
    final CepOperator<RowData, RowData, RowData> operator = new CepOperator<>(inputSerializer, isProctime, nfaFactory, eventComparator, cepPattern.getAfterMatchSkipStrategy(), patternProcessFunction, null);
    final OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(timestampedInputTransform, createTransformationMeta(MATCH_TRANSFORMATION, config), operator, InternalTypeInfo.of(getOutputType()), timestampedInputTransform.getParallelism());
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(partitionKeys, inputTypeInfo);
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    if (inputsContainSingleton()) {
        transform.setParallelism(1);
        transform.setMaxParallelism(1);
    }
    return transform;
}
Also used : OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) RowType(org.apache.flink.table.types.logical.RowType) LogicalType(org.apache.flink.table.types.logical.LogicalType) NFACompiler(org.apache.flink.cep.nfa.compiler.NFACompiler) RowData(org.apache.flink.table.data.RowData) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) List(java.util.List) ArrayList(java.util.ArrayList) Pattern(org.apache.flink.cep.pattern.Pattern) TableException(org.apache.flink.table.api.TableException) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) InternalTypeInfo(org.apache.flink.table.runtime.typeutils.InternalTypeInfo) MatchCodeGenerator(org.apache.flink.table.planner.codegen.MatchCodeGenerator) PatternProcessFunctionRunner(org.apache.flink.table.runtime.operators.match.PatternProcessFunctionRunner) CepOperator(org.apache.flink.cep.operator.CepOperator) SortSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.SortSpec)

Example 4 with CodeGeneratorContext

use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.

the class StreamExecOverAggregate method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    if (overSpec.getGroups().size() > 1) {
        throw new TableException("All aggregates must be computed on the same window.");
    }
    final OverSpec.GroupSpec group = overSpec.getGroups().get(0);
    final int[] orderKeys = group.getSort().getFieldIndices();
    final boolean[] isAscendingOrders = group.getSort().getAscendingOrders();
    if (orderKeys.length != 1 || isAscendingOrders.length != 1) {
        throw new TableException("The window can only be ordered by a single time column.");
    }
    if (!isAscendingOrders[0]) {
        throw new TableException("The window can only be ordered in ASCENDING mode.");
    }
    final int[] partitionKeys = overSpec.getPartition().getFieldIndices();
    if (partitionKeys.length > 0 && config.getStateRetentionTime() < 0) {
        LOG.warn("No state retention interval configured for a query which accumulates state. " + "Please provide a query configuration with valid retention interval to prevent " + "excessive state size. You may specify a retention time of 0 to not clean up the state.");
    }
    final ExecEdge inputEdge = getInputEdges().get(0);
    final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    final RowType inputRowType = (RowType) inputEdge.getOutputType();
    final int orderKey = orderKeys[0];
    final LogicalType orderKeyType = inputRowType.getFields().get(orderKey).getType();
    // check time field && identify window rowtime attribute
    final int rowTimeIdx;
    if (isRowtimeAttribute(orderKeyType)) {
        rowTimeIdx = orderKey;
    } else if (isProctimeAttribute(orderKeyType)) {
        rowTimeIdx = -1;
    } else {
        throw new TableException("OVER windows' ordering in stream mode must be defined on a time attribute.");
    }
    final List<RexLiteral> constants = overSpec.getConstants();
    final List<String> fieldNames = new ArrayList<>(inputRowType.getFieldNames());
    final List<LogicalType> fieldTypes = new ArrayList<>(inputRowType.getChildren());
    IntStream.range(0, constants.size()).forEach(i -> fieldNames.add("TMP" + i));
    for (int i = 0; i < constants.size(); ++i) {
        fieldNames.add("TMP" + i);
        fieldTypes.add(FlinkTypeFactory.toLogicalType(constants.get(i).getType()));
    }
    final RowType aggInputRowType = RowType.of(fieldTypes.toArray(new LogicalType[0]), fieldNames.toArray(new String[0]));
    final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
    final KeyedProcessFunction<RowData, RowData, RowData> overProcessFunction;
    if (group.getLowerBound().isPreceding() && group.getLowerBound().isUnbounded() && group.getUpperBound().isCurrentRow()) {
        // unbounded OVER window
        overProcessFunction = createUnboundedOverProcessFunction(ctx, group.getAggCalls(), constants, aggInputRowType, inputRowType, rowTimeIdx, group.isRows(), config, planner.getRelBuilder());
    } else if (group.getLowerBound().isPreceding() && !group.getLowerBound().isUnbounded() && group.getUpperBound().isCurrentRow()) {
        final Object boundValue = OverAggregateUtil.getBoundary(overSpec, group.getLowerBound());
        if (boundValue instanceof BigDecimal) {
            throw new TableException("the specific value is decimal which haven not supported yet.");
        }
        // bounded OVER window
        final long precedingOffset = -1 * (long) boundValue + (group.isRows() ? 1 : 0);
        overProcessFunction = createBoundedOverProcessFunction(ctx, group.getAggCalls(), constants, aggInputRowType, inputRowType, rowTimeIdx, group.isRows(), precedingOffset, config, planner.getRelBuilder());
    } else {
        throw new TableException("OVER RANGE FOLLOWING windows are not supported yet.");
    }
    final KeyedProcessOperator<RowData, RowData, RowData> operator = new KeyedProcessOperator<>(overProcessFunction);
    OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(OVER_AGGREGATE_TRANSFORMATION, config), operator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
    // set KeyType and Selector for state
    final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(partitionKeys, InternalTypeInfo.of(inputRowType));
    transform.setStateKeySelector(selector);
    transform.setStateKeyType(selector.getProducedType());
    return transform;
}
Also used : RexLiteral(org.apache.calcite.rex.RexLiteral) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) ArrayList(java.util.ArrayList) RowType(org.apache.flink.table.types.logical.RowType) LogicalType(org.apache.flink.table.types.logical.LogicalType) RowData(org.apache.flink.table.data.RowData) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) KeyedProcessOperator(org.apache.flink.streaming.api.operators.KeyedProcessOperator) TableException(org.apache.flink.table.api.TableException) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) OverSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.OverSpec) BigDecimal(java.math.BigDecimal)

Example 5 with CodeGeneratorContext

use of org.apache.flink.table.planner.codegen.CodeGeneratorContext in project flink by apache.

the class StreamExecLegacyTableSourceScan method createConversionTransformationIfNeeded.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> createConversionTransformationIfNeeded(StreamExecutionEnvironment streamExecEnv, ExecNodeConfig config, Transformation<?> sourceTransform, @Nullable RexNode rowtimeExpression) {
    final RowType outputType = (RowType) getOutputType();
    final Transformation<RowData> transformation;
    final int[] fieldIndexes = computeIndexMapping(true);
    if (needInternalConversion(fieldIndexes)) {
        final String extractElement, resetElement;
        if (ScanUtil.hasTimeAttributeField(fieldIndexes)) {
            String elementTerm = OperatorCodeGenerator.ELEMENT();
            extractElement = String.format("ctx.%s = %s;", elementTerm, elementTerm);
            resetElement = String.format("ctx.%s = null;", elementTerm);
        } else {
            extractElement = "";
            resetElement = "";
        }
        final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig()).setOperatorBaseClass(TableStreamOperator.class);
        // the produced type may not carry the correct precision user defined in DDL, because
        // it may be converted from legacy type. Fix precision using logical schema from DDL.
        // Code generation requires the correct precision of input fields.
        final DataType fixedProducedDataType = TableSourceUtil.fixPrecisionForProducedDataType(tableSource, outputType);
        transformation = ScanUtil.convertToInternalRow(ctx, (Transformation<Object>) sourceTransform, fieldIndexes, fixedProducedDataType, outputType, qualifiedName, (detailName, simplifyName) -> createFormattedTransformationName(detailName, simplifyName, config), (description) -> createFormattedTransformationDescription(description, config), JavaScalaConversionUtil.toScala(Optional.ofNullable(rowtimeExpression)), extractElement, resetElement);
    } else {
        transformation = (Transformation<RowData>) sourceTransform;
    }
    final RelDataType relDataType = FlinkTypeFactory.INSTANCE().buildRelNodeRowType(outputType);
    final DataStream<RowData> ingestedTable = new DataStream<>(streamExecEnv, transformation);
    final Optional<RowtimeAttributeDescriptor> rowtimeDesc = JavaScalaConversionUtil.toJava(TableSourceUtil.getRowtimeAttributeDescriptor(tableSource, relDataType));
    final DataStream<RowData> withWatermarks = rowtimeDesc.map(desc -> {
        int rowtimeFieldIdx = relDataType.getFieldNames().indexOf(desc.getAttributeName());
        WatermarkStrategy strategy = desc.getWatermarkStrategy();
        if (strategy instanceof PeriodicWatermarkAssigner) {
            PeriodicWatermarkAssignerWrapper watermarkGenerator = new PeriodicWatermarkAssignerWrapper((PeriodicWatermarkAssigner) strategy, rowtimeFieldIdx);
            return ingestedTable.assignTimestampsAndWatermarks(watermarkGenerator);
        } else if (strategy instanceof PunctuatedWatermarkAssigner) {
            PunctuatedWatermarkAssignerWrapper watermarkGenerator = new PunctuatedWatermarkAssignerWrapper((PunctuatedWatermarkAssigner) strategy, rowtimeFieldIdx, tableSource.getProducedDataType());
            return ingestedTable.assignTimestampsAndWatermarks(watermarkGenerator);
        } else {
            // underlying DataStream.
            return ingestedTable;
        }
    }).orElse(// No need to generate watermarks if no rowtime
    ingestedTable);
    // attribute is specified.
    return withWatermarks.getTransformation();
}
Also used : TableStreamOperator(org.apache.flink.table.runtime.operators.TableStreamOperator) DataType(org.apache.flink.table.types.DataType) TableSourceUtil(org.apache.flink.table.planner.sources.TableSourceUtil) RowtimeAttributeDescriptor(org.apache.flink.table.sources.RowtimeAttributeDescriptor) TableSource(org.apache.flink.table.sources.TableSource) PeriodicWatermarkAssigner(org.apache.flink.table.sources.wmstrategies.PeriodicWatermarkAssigner) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) RowType(org.apache.flink.table.types.logical.RowType) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) ScanUtil(org.apache.flink.table.planner.plan.utils.ScanUtil) RexNode(org.apache.calcite.rex.RexNode) InputFormat(org.apache.flink.api.common.io.InputFormat) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) Nullable(javax.annotation.Nullable) RelDataType(org.apache.calcite.rel.type.RelDataType) ExecNodeContext(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeContext) RowData(org.apache.flink.table.data.RowData) InputSplit(org.apache.flink.core.io.InputSplit) ExecNodeConfig(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeConfig) WatermarkStrategy(org.apache.flink.table.sources.wmstrategies.WatermarkStrategy) PunctuatedWatermarkAssigner(org.apache.flink.table.sources.wmstrategies.PunctuatedWatermarkAssigner) StreamTableSource(org.apache.flink.table.sources.StreamTableSource) DataStream(org.apache.flink.streaming.api.datastream.DataStream) OperatorCodeGenerator(org.apache.flink.table.planner.codegen.OperatorCodeGenerator) CommonExecLegacyTableSourceScan(org.apache.flink.table.planner.plan.nodes.exec.common.CommonExecLegacyTableSourceScan) List(java.util.List) PunctuatedWatermarkAssignerWrapper(org.apache.flink.table.runtime.operators.wmassigners.PunctuatedWatermarkAssignerWrapper) JavaScalaConversionUtil(org.apache.flink.table.planner.utils.JavaScalaConversionUtil) Optional(java.util.Optional) Transformation(org.apache.flink.api.dag.Transformation) PeriodicWatermarkAssignerWrapper(org.apache.flink.table.runtime.operators.wmassigners.PeriodicWatermarkAssignerWrapper) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Transformation(org.apache.flink.api.dag.Transformation) CodeGeneratorContext(org.apache.flink.table.planner.codegen.CodeGeneratorContext) DataStream(org.apache.flink.streaming.api.datastream.DataStream) RowType(org.apache.flink.table.types.logical.RowType) RelDataType(org.apache.calcite.rel.type.RelDataType) PeriodicWatermarkAssigner(org.apache.flink.table.sources.wmstrategies.PeriodicWatermarkAssigner) RowData(org.apache.flink.table.data.RowData) PunctuatedWatermarkAssignerWrapper(org.apache.flink.table.runtime.operators.wmassigners.PunctuatedWatermarkAssignerWrapper) RowtimeAttributeDescriptor(org.apache.flink.table.sources.RowtimeAttributeDescriptor) PunctuatedWatermarkAssigner(org.apache.flink.table.sources.wmstrategies.PunctuatedWatermarkAssigner) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) WatermarkStrategy(org.apache.flink.table.sources.wmstrategies.WatermarkStrategy) PeriodicWatermarkAssignerWrapper(org.apache.flink.table.runtime.operators.wmassigners.PeriodicWatermarkAssignerWrapper)

Aggregations

CodeGeneratorContext (org.apache.flink.table.planner.codegen.CodeGeneratorContext)25 RowData (org.apache.flink.table.data.RowData)20 Transformation (org.apache.flink.api.dag.Transformation)19 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)17 RowType (org.apache.flink.table.types.logical.RowType)17 AggregateInfoList (org.apache.flink.table.planner.plan.utils.AggregateInfoList)9 AggsHandlerCodeGenerator (org.apache.flink.table.planner.codegen.agg.AggsHandlerCodeGenerator)8 LogicalType (org.apache.flink.table.types.logical.LogicalType)7 TableException (org.apache.flink.table.api.TableException)5 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)5 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)4 GeneratedAggsHandleFunction (org.apache.flink.table.runtime.generated.GeneratedAggsHandleFunction)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 OneInputStreamOperator (org.apache.flink.streaming.api.operators.OneInputStreamOperator)3 GeneratedJoinCondition (org.apache.flink.table.runtime.generated.GeneratedJoinCondition)3 DataType (org.apache.flink.table.types.DataType)3 Optional (java.util.Optional)2 RexNode (org.apache.calcite.rex.RexNode)2 DataStream (org.apache.flink.streaming.api.datastream.DataStream)2