Search in sources :

Example 16 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class StreamExecTemporalSort method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    // time ordering needs to be ascending
    if (sortSpec.getFieldSize() == 0 || !sortSpec.getFieldSpec(0).getIsAscendingOrder()) {
        throw new TableException("Sort: Primary sort order of a streaming table must be ascending on time.\n" + "please re-check sort statement according to the description above");
    }
    ExecEdge inputEdge = getInputEdges().get(0);
    Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    RowType inputType = (RowType) inputEdge.getOutputType();
    LogicalType timeType = inputType.getTypeAt(sortSpec.getFieldSpec(0).getFieldIndex());
    if (isRowtimeAttribute(timeType)) {
        return createSortRowTime(inputType, inputTransform, config);
    } else if (isProctimeAttribute(timeType)) {
        return createSortProcTime(inputType, inputTransform, config);
    } else {
        throw new TableException(String.format("Sort: Internal Error\n" + "First field in temporal sort is not a time attribute, %s is given.", timeType));
    }
}
Also used : TableException(org.apache.flink.table.api.TableException) RowData(org.apache.flink.table.data.RowData) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) RowType(org.apache.flink.table.types.logical.RowType) LogicalType(org.apache.flink.table.types.logical.LogicalType)

Example 17 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class FlinkAggregateExpandDistinctAggregatesRule method onMatch.

// ~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
    final Aggregate aggregate = call.rel(0);
    if (!AggregateUtil.containsAccurateDistinctCall(aggregate.getAggCallList())) {
        return;
    }
    // accurate distinct call.
    if (AggregateUtil.containsApproximateDistinctCall(aggregate.getAggCallList())) {
        throw new TableException("There are both Distinct AggCall and Approximate Distinct AggCall in one sql statement, " + "it is not supported yet.\nPlease choose one of them.");
    }
    // by DecomposeGroupingSetsRule. Then this rule expands it's distinct aggregates.
    if (aggregate.getGroupSets().size() > 1) {
        return;
    }
    // Find all of the agg expressions. We use a LinkedHashSet to ensure determinism.
    // Find all aggregate calls without distinct
    int nonDistinctAggCallCount = 0;
    // Find all aggregate calls without distinct but ignore MAX, MIN, BIT_AND, BIT_OR
    int nonDistinctAggCallExcludingIgnoredCount = 0;
    int filterCount = 0;
    int unsupportedNonDistinctAggCallCount = 0;
    final Set<Pair<List<Integer>, Integer>> argLists = new LinkedHashSet<>();
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        if (aggCall.filterArg >= 0) {
            ++filterCount;
        }
        if (!aggCall.isDistinct()) {
            ++nonDistinctAggCallCount;
            final SqlKind aggCallKind = aggCall.getAggregation().getKind();
            // We only support COUNT/SUM/MIN/MAX for the "single" count distinct optimization
            switch(aggCallKind) {
                case COUNT:
                case SUM:
                case SUM0:
                case MIN:
                case MAX:
                    break;
                default:
                    ++unsupportedNonDistinctAggCallCount;
            }
            if (aggCall.getAggregation().getDistinctOptionality() == Optionality.IGNORED) {
                argLists.add(Pair.of(aggCall.getArgList(), aggCall.filterArg));
            } else {
                ++nonDistinctAggCallExcludingIgnoredCount;
            }
        } else {
            argLists.add(Pair.of(aggCall.getArgList(), aggCall.filterArg));
        }
    }
    final int distinctAggCallCount = aggregate.getAggCallList().size() - nonDistinctAggCallCount;
    Preconditions.checkState(argLists.size() > 0, "containsDistinctCall lied");
    // we can still use this promotion.
    if (nonDistinctAggCallExcludingIgnoredCount == 0 && argLists.size() == 1 && aggregate.getGroupType() == Group.SIMPLE) {
        final Pair<List<Integer>, Integer> pair = com.google.common.collect.Iterables.getOnlyElement(argLists);
        final RelBuilder relBuilder = call.builder();
        convertMonopole(relBuilder, aggregate, pair.left, pair.right);
        call.transformTo(relBuilder.build());
        return;
    }
    if (useGroupingSets) {
        rewriteUsingGroupingSets(call, aggregate);
        return;
    }
    // we can generate multi-phase aggregates
    if (// one distinct aggregate
    distinctAggCallCount == 1 && // no filter
    filterCount == 0 && unsupportedNonDistinctAggCallCount == // sum/min/max/count in non-distinct aggregate
    0 && nonDistinctAggCallCount > 0) {
        // one or more non-distinct aggregates
        final RelBuilder relBuilder = call.builder();
        convertSingletonDistinct(relBuilder, aggregate, argLists);
        call.transformTo(relBuilder.build());
        return;
    }
    // Create a list of the expressions which will yield the final result.
    // Initially, the expressions point to the input field.
    final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList();
    final List<RexInputRef> refs = new ArrayList<>();
    final List<String> fieldNames = aggregate.getRowType().getFieldNames();
    final ImmutableBitSet groupSet = aggregate.getGroupSet();
    final int groupCount = aggregate.getGroupCount();
    for (int i : Util.range(groupCount)) {
        refs.add(RexInputRef.of(i, aggFields));
    }
    // Aggregate the original relation, including any non-distinct aggregates.
    final List<AggregateCall> newAggCallList = new ArrayList<>();
    int i = -1;
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        ++i;
        if (aggCall.isDistinct()) {
            refs.add(null);
            continue;
        }
        refs.add(new RexInputRef(groupCount + newAggCallList.size(), aggFields.get(groupCount + i).getType()));
        newAggCallList.add(aggCall);
    }
    // In the case where there are no non-distinct aggregates (regardless of
    // whether there are group bys), there's no need to generate the
    // extra aggregate and join.
    final RelBuilder relBuilder = call.builder();
    relBuilder.push(aggregate.getInput());
    int n = 0;
    if (!newAggCallList.isEmpty()) {
        final RelBuilder.GroupKey groupKey = relBuilder.groupKey(groupSet, aggregate.getGroupSets());
        relBuilder.aggregate(groupKey, newAggCallList);
        ++n;
    }
    // set of operands.
    for (Pair<List<Integer>, Integer> argList : argLists) {
        doRewrite(relBuilder, aggregate, n++, argList.left, argList.right, refs);
    }
    relBuilder.project(refs, fieldNames);
    call.transformTo(relBuilder.build());
}
Also used : LinkedHashSet(java.util.LinkedHashSet) TableException(org.apache.flink.table.api.TableException) RelBuilder(org.apache.calcite.tools.RelBuilder) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) ArrayList(java.util.ArrayList) SqlKind(org.apache.calcite.sql.SqlKind) AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RexInputRef(org.apache.calcite.rex.RexInputRef) ArrayList(java.util.ArrayList) ImmutableIntList(org.apache.calcite.util.ImmutableIntList) List(java.util.List) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) Pair(org.apache.calcite.util.Pair)

Example 18 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class TemporalTableSourceSpec method getTemporalTable.

@JsonIgnore
public RelOptTable getTemporalTable(FlinkContext flinkContext) {
    if (null != temporalTable) {
        return temporalTable;
    }
    if (null != tableSourceSpec && null != outputType) {
        LookupTableSource lookupTableSource = tableSourceSpec.getLookupTableSource(flinkContext);
        SourceAbilitySpec[] sourceAbilitySpecs = null;
        if (null != tableSourceSpec.getSourceAbilities()) {
            sourceAbilitySpecs = tableSourceSpec.getSourceAbilities().toArray(new SourceAbilitySpec[0]);
        }
        return new TableSourceTable(null, outputType, FlinkStatistic.UNKNOWN(), lookupTableSource, true, tableSourceSpec.getContextResolvedTable(), flinkContext, sourceAbilitySpecs);
    }
    throw new TableException("Can not obtain temporalTable correctly!");
}
Also used : TableException(org.apache.flink.table.api.TableException) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) LookupTableSource(org.apache.flink.table.connector.source.LookupTableSource) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) JsonIgnore(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonIgnore)

Example 19 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class StreamExecPythonGroupWindowAggregate method generateWindowAssignerAndTrigger.

private Tuple2<WindowAssigner<?>, Trigger<?>> generateWindowAssignerAndTrigger() {
    WindowAssigner<?> windowAssiger;
    Trigger<?> trigger;
    if (window instanceof TumblingGroupWindow) {
        TumblingGroupWindow tumblingWindow = (TumblingGroupWindow) window;
        FieldReferenceExpression timeField = tumblingWindow.timeField();
        ValueLiteralExpression size = tumblingWindow.size();
        if (isProctimeAttribute(timeField) && hasTimeIntervalType(size)) {
            windowAssiger = TumblingWindowAssigner.of(toDuration(size)).withProcessingTime();
            trigger = ProcessingTimeTriggers.afterEndOfWindow();
        } else if (isRowtimeAttribute(timeField) && hasTimeIntervalType(size)) {
            windowAssiger = TumblingWindowAssigner.of(toDuration(size)).withEventTime();
            trigger = EventTimeTriggers.afterEndOfWindow();
        } else if (isProctimeAttribute(timeField) && hasRowIntervalType(size)) {
            windowAssiger = CountTumblingWindowAssigner.of(toLong(size));
            trigger = ElementTriggers.count(toLong(size));
        } else {
            // ProcessingTimeTumblingGroupWindow
            throw new UnsupportedOperationException("Event-time grouping windows on row intervals are currently not supported.");
        }
    } else if (window instanceof SlidingGroupWindow) {
        SlidingGroupWindow slidingWindow = (SlidingGroupWindow) window;
        FieldReferenceExpression timeField = slidingWindow.timeField();
        ValueLiteralExpression size = slidingWindow.size();
        ValueLiteralExpression slide = slidingWindow.slide();
        if (isProctimeAttribute(timeField) && hasTimeIntervalType(size)) {
            windowAssiger = SlidingWindowAssigner.of(toDuration(size), toDuration(slide)).withProcessingTime();
            trigger = ProcessingTimeTriggers.afterEndOfWindow();
        } else if (isRowtimeAttribute(timeField) && hasTimeIntervalType(size)) {
            windowAssiger = SlidingWindowAssigner.of(toDuration(size), toDuration(slide));
            trigger = EventTimeTriggers.afterEndOfWindow();
        } else if (isProctimeAttribute(timeField) && hasRowIntervalType(size)) {
            windowAssiger = CountSlidingWindowAssigner.of(toLong(size), toLong(slide));
            trigger = ElementTriggers.count(toLong(size));
        } else {
            // ProcessingTimeTumblingGroupWindow
            throw new UnsupportedOperationException("Event-time grouping windows on row intervals are currently not supported.");
        }
    } else if (window instanceof SessionGroupWindow) {
        SessionGroupWindow sessionWindow = (SessionGroupWindow) window;
        FieldReferenceExpression timeField = sessionWindow.timeField();
        ValueLiteralExpression gap = sessionWindow.gap();
        if (isProctimeAttribute(timeField)) {
            windowAssiger = SessionWindowAssigner.withGap(toDuration(gap));
            trigger = ProcessingTimeTriggers.afterEndOfWindow();
        } else if (isRowtimeAttribute(timeField)) {
            windowAssiger = SessionWindowAssigner.withGap(toDuration(gap));
            trigger = EventTimeTriggers.afterEndOfWindow();
        } else {
            throw new UnsupportedOperationException("This should not happen.");
        }
    } else {
        throw new TableException("Unsupported window: " + window.toString());
    }
    return Tuple2.of(windowAssiger, trigger);
}
Also used : TableException(org.apache.flink.table.api.TableException) TumblingGroupWindow(org.apache.flink.table.planner.plan.logical.TumblingGroupWindow) ValueLiteralExpression(org.apache.flink.table.expressions.ValueLiteralExpression) FieldReferenceExpression(org.apache.flink.table.expressions.FieldReferenceExpression) SlidingGroupWindow(org.apache.flink.table.planner.plan.logical.SlidingGroupWindow) SessionGroupWindow(org.apache.flink.table.planner.plan.logical.SessionGroupWindow)

Example 20 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class StreamExecSort method translateToPlanInternal.

@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
    if (!config.get(InternalConfigOptions.TABLE_EXEC_NON_TEMPORAL_SORT_ENABLED)) {
        throw new TableException("Sort on a non-time-attribute field is not supported.");
    }
    ExecEdge inputEdge = getInputEdges().get(0);
    RowType inputType = (RowType) inputEdge.getOutputType();
    // sort code gen
    GeneratedRecordComparator rowComparator = ComparatorCodeGenerator.gen(config.getTableConfig(), "StreamExecSortComparator", inputType, sortSpec);
    StreamSortOperator sortOperator = new StreamSortOperator(InternalTypeInfo.of(inputType), rowComparator);
    Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
    return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(SORT_TRANSFORMATION, config), sortOperator, InternalTypeInfo.of(inputType), inputTransform.getParallelism());
}
Also used : TableException(org.apache.flink.table.api.TableException) RowData(org.apache.flink.table.data.RowData) Transformation(org.apache.flink.api.dag.Transformation) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) RowType(org.apache.flink.table.types.logical.RowType) StreamSortOperator(org.apache.flink.table.runtime.operators.sort.StreamSortOperator) GeneratedRecordComparator(org.apache.flink.table.runtime.generated.GeneratedRecordComparator)

Aggregations

TableException (org.apache.flink.table.api.TableException)163 RowData (org.apache.flink.table.data.RowData)35 RowType (org.apache.flink.table.types.logical.RowType)35 Transformation (org.apache.flink.api.dag.Transformation)28 ArrayList (java.util.ArrayList)27 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)24 LogicalType (org.apache.flink.table.types.logical.LogicalType)24 List (java.util.List)22 DataType (org.apache.flink.table.types.DataType)19 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)18 ValidationException (org.apache.flink.table.api.ValidationException)17 IOException (java.io.IOException)13 AggregateCall (org.apache.calcite.rel.core.AggregateCall)13 ValueLiteralExpression (org.apache.flink.table.expressions.ValueLiteralExpression)13 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)13 Optional (java.util.Optional)11 Configuration (org.apache.flink.configuration.Configuration)11 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)11 Constructor (java.lang.reflect.Constructor)10 Arrays (java.util.Arrays)9