Search in sources :

Example 71 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class CommonExecSink method deriveSinkParallelism.

/**
 * Returns the parallelism of sink operator, it assumes the sink runtime provider implements
 * {@link ParallelismProvider}. It returns parallelism defined in {@link ParallelismProvider} if
 * the parallelism is provided, otherwise it uses parallelism of input transformation.
 */
private int deriveSinkParallelism(Transformation<RowData> inputTransform, SinkRuntimeProvider runtimeProvider) {
    final int inputParallelism = inputTransform.getParallelism();
    if (!(runtimeProvider instanceof ParallelismProvider)) {
        return inputParallelism;
    }
    final ParallelismProvider parallelismProvider = (ParallelismProvider) runtimeProvider;
    return parallelismProvider.getParallelism().map(sinkParallelism -> {
        if (sinkParallelism <= 0) {
            throw new TableException(String.format("Invalid configured parallelism %s for table '%s'.", sinkParallelism, tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
        }
        return sinkParallelism;
    }).orElse(inputParallelism);
}
Also used : TransformationMetadata(org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata) Arrays(java.util.Arrays) InputProperty(org.apache.flink.table.planner.plan.nodes.exec.InputProperty) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) CharType(org.apache.flink.table.types.logical.CharType) KeySelectorUtil(org.apache.flink.table.planner.plan.utils.KeySelectorUtil) InternalSerializers(org.apache.flink.table.runtime.typeutils.InternalSerializers) ConstraintEnforcer(org.apache.flink.table.runtime.operators.sink.ConstraintEnforcer) OutputFormat(org.apache.flink.api.common.io.OutputFormat) SinkUpsertMaterializer(org.apache.flink.table.runtime.operators.sink.SinkUpsertMaterializer) PartitionTransformation(org.apache.flink.streaming.api.transformations.PartitionTransformation) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) StateConfigUtil(org.apache.flink.table.runtime.util.StateConfigUtil) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) Collectors(java.util.stream.Collectors) JsonProperty(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty) SimpleOperatorFactory(org.apache.flink.streaming.api.operators.SimpleOperatorFactory) List(java.util.List) InternalTypeInfo(org.apache.flink.table.runtime.typeutils.InternalTypeInfo) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) LegacySinkTransformation(org.apache.flink.streaming.api.transformations.LegacySinkTransformation) Optional(java.util.Optional) ExecNodeBase(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeBase) KeyGroupRangeAssignment(org.apache.flink.runtime.state.KeyGroupRangeAssignment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) SinkRuntimeProvider(org.apache.flink.table.connector.sink.DynamicTableSink.SinkRuntimeProvider) IntStream(java.util.stream.IntStream) BinaryType(org.apache.flink.table.types.logical.BinaryType) ParallelismProvider(org.apache.flink.table.connector.ParallelismProvider) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) MultipleTransformationTranslator(org.apache.flink.table.planner.plan.nodes.exec.MultipleTransformationTranslator) StreamRecordTimestampInserter(org.apache.flink.table.runtime.operators.sink.StreamRecordTimestampInserter) TransformationSinkProvider(org.apache.flink.table.planner.connectors.TransformationSinkProvider) RowType(org.apache.flink.table.types.logical.RowType) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) OutputFormatSinkFunction(org.apache.flink.streaming.api.functions.sink.OutputFormatSinkFunction) DynamicTableSinkSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec) ExecNodeUtil(org.apache.flink.table.planner.plan.nodes.exec.utils.ExecNodeUtil) ReadableConfig(org.apache.flink.configuration.ReadableConfig) SinkFunctionProvider(org.apache.flink.table.connector.sink.SinkFunctionProvider) DataStreamSink(org.apache.flink.streaming.api.datastream.DataStreamSink) ExecNodeContext(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeContext) RowData(org.apache.flink.table.data.RowData) ProviderContext(org.apache.flink.table.connector.ProviderContext) SinkOperator(org.apache.flink.table.runtime.operators.sink.SinkOperator) TableException(org.apache.flink.table.api.TableException) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) DataStream(org.apache.flink.streaming.api.datastream.DataStream) OutputFormatProvider(org.apache.flink.table.connector.sink.OutputFormatProvider) KeyGroupStreamPartitioner(org.apache.flink.streaming.runtime.partitioner.KeyGroupStreamPartitioner) EqualiserCodeGenerator(org.apache.flink.table.planner.codegen.EqualiserCodeGenerator) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) RowKind(org.apache.flink.types.RowKind) StreamExecNode(org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecNode) GeneratedRecordEqualiser(org.apache.flink.table.runtime.generated.GeneratedRecordEqualiser) Transformation(org.apache.flink.api.dag.Transformation) InputTypeConfigurable(org.apache.flink.api.java.typeutils.InputTypeConfigurable) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) TableException(org.apache.flink.table.api.TableException) ParallelismProvider(org.apache.flink.table.connector.ParallelismProvider)

Example 72 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class JsonPlanGraph method convertToExecNodeGraph.

ExecNodeGraph convertToExecNodeGraph() {
    Map<Integer, ExecNode<?>> idToExecNodes = new HashMap<>();
    for (ExecNode<?> execNode : nodes) {
        int id = execNode.getId();
        if (idToExecNodes.containsKey(id)) {
            throw new TableException(String.format("The id: %s is not unique for ExecNode: %s.\nplease check it.", id, execNode.getDescription()));
        }
        idToExecNodes.put(id, execNode);
    }
    Map<Integer, List<ExecEdge>> idToInputEdges = new HashMap<>();
    Map<Integer, List<ExecEdge>> idToOutputEdges = new HashMap<>();
    for (JsonPlanEdge edge : edges) {
        ExecNode<?> source = idToExecNodes.get(edge.getSourceId());
        if (source == null) {
            throw new TableException(String.format("Source node id: %s is not found in nodes.", edge.getSourceId()));
        }
        ExecNode<?> target = idToExecNodes.get(edge.getTargetId());
        if (target == null) {
            throw new TableException(String.format("Target node id: %s is not found in nodes.", edge.getTargetId()));
        }
        ExecEdge execEdge = ExecEdge.builder().source(source).target(target).shuffle(edge.getShuffle()).exchangeMode(edge.getExchangeMode()).build();
        idToInputEdges.computeIfAbsent(target.getId(), n -> new ArrayList<>()).add(execEdge);
        idToOutputEdges.computeIfAbsent(source.getId(), n -> new ArrayList<>()).add(execEdge);
    }
    List<ExecNode<?>> rootNodes = new ArrayList<>();
    for (Map.Entry<Integer, ExecNode<?>> entry : idToExecNodes.entrySet()) {
        int id = entry.getKey();
        ExecNode<?> node = entry.getValue();
        // connect input edges
        List<ExecEdge> inputEdges = idToInputEdges.getOrDefault(id, new ArrayList<>());
        node.setInputEdges(inputEdges);
        if (!idToOutputEdges.containsKey(id)) {
            // if the node has no output nodes, it's a root node
            rootNodes.add(node);
        }
    }
    return new ExecNodeGraph(flinkVersion, rootNodes);
}
Also used : JsonCreator(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator) Sets(org.apache.flink.shaded.guava30.com.google.common.collect.Sets) ExecNodeVisitorImpl(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitorImpl) TableException(org.apache.flink.table.api.TableException) Set(java.util.Set) HashMap(java.util.HashMap) JsonProperty(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) Preconditions.checkArgument(org.apache.flink.util.Preconditions.checkArgument) ExecNodeVisitor(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitor) Map(java.util.Map) Internal(org.apache.flink.annotation.Internal) FlinkVersion(org.apache.flink.FlinkVersion) ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) TableException(org.apache.flink.table.api.TableException) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) HashMap(java.util.HashMap) Map(java.util.Map)

Example 73 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class JsonPlanGraph method fromExecNodeGraph.

static JsonPlanGraph fromExecNodeGraph(ExecNodeGraph execGraph) {
    final List<ExecNode<?>> allNodes = new ArrayList<>();
    final List<JsonPlanEdge> allEdges = new ArrayList<>();
    final Set<Integer> nodesIds = new HashSet<>();
    // for quick search
    final Set<ExecNode<?>> visitedNodes = Sets.newIdentityHashSet();
    // visit the nodes as topological ordering
    final ExecNodeVisitor visitor = new ExecNodeVisitorImpl() {

        @Override
        public void visit(ExecNode<?> node) {
            if (visitedNodes.contains(node)) {
                return;
            }
            super.visitInputs(node);
            final int id = node.getId();
            if (nodesIds.contains(id)) {
                throw new TableException(String.format("The id: %s is not unique for ExecNode: %s.\nplease check it.", id, node.getDescription()));
            }
            allNodes.add(node);
            nodesIds.add(id);
            visitedNodes.add(node);
            for (ExecEdge execEdge : node.getInputEdges()) {
                allEdges.add(JsonPlanEdge.fromExecEdge(execEdge));
            }
        }
    };
    execGraph.getRootNodes().forEach(visitor::visit);
    checkArgument(allNodes.size() == nodesIds.size());
    return new JsonPlanGraph(execGraph.getFlinkVersion(), allNodes, allEdges);
}
Also used : TableException(org.apache.flink.table.api.TableException) ExecNodeVisitorImpl(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitorImpl) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) ArrayList(java.util.ArrayList) ExecNodeVisitor(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitor) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) HashSet(java.util.HashSet)

Example 74 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class LogicalTypeJsonDeserializer method deserializeSpecializedRaw.

@SuppressWarnings({ "unchecked", "rawtypes" })
private static LogicalType deserializeSpecializedRaw(JsonNode logicalTypeNode, SerdeContext serdeContext) {
    final Class<?> clazz = loadClass(logicalTypeNode.get(FIELD_NAME_CLASS).asText(), serdeContext, "RAW type");
    final TypeSerializer<?> serializer;
    if (logicalTypeNode.has(FIELD_NAME_SPECIAL_SERIALIZER)) {
        final String specialSerializer = logicalTypeNode.get(FIELD_NAME_SPECIAL_SERIALIZER).asText();
        if (FIELD_VALUE_EXTERNAL_SERIALIZER_NULL.equals(specialSerializer)) {
            serializer = NullSerializer.INSTANCE;
        } else {
            throw new TableException("Unknown external serializer: " + specialSerializer);
        }
    } else if (logicalTypeNode.has(FIELD_NAME_EXTERNAL_DATA_TYPE)) {
        final DataType dataType = DataTypeJsonDeserializer.deserialize(logicalTypeNode.get(FIELD_NAME_EXTERNAL_DATA_TYPE), serdeContext);
        serializer = ExternalSerializer.of(dataType);
    } else {
        throw new TableException("Invalid RAW type.");
    }
    return new RawType(clazz, serializer);
}
Also used : TableException(org.apache.flink.table.api.TableException) DataType(org.apache.flink.table.types.DataType) RawType(org.apache.flink.table.types.logical.RawType)

Example 75 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class LogicalTypeJsonDeserializer method deserializeTimestamp.

private static LogicalType deserializeTimestamp(LogicalTypeRoot typeRoot, JsonNode logicalTypeNode) {
    final int precision = logicalTypeNode.get(FIELD_NAME_PRECISION).asInt();
    final TimestampKind kind = TimestampKind.valueOf(logicalTypeNode.get(FIELD_NAME_TIMESTAMP_KIND).asText());
    switch(typeRoot) {
        case TIMESTAMP_WITHOUT_TIME_ZONE:
            return new TimestampType(true, kind, precision);
        case TIMESTAMP_WITH_TIME_ZONE:
            return new ZonedTimestampType(true, kind, precision);
        case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
            return new LocalZonedTimestampType(true, kind, precision);
        default:
            throw new TableException("Timestamp type root expected.");
    }
}
Also used : TableException(org.apache.flink.table.api.TableException) LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) ZonedTimestampType(org.apache.flink.table.types.logical.ZonedTimestampType) LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) TimestampType(org.apache.flink.table.types.logical.TimestampType) ZonedTimestampType(org.apache.flink.table.types.logical.ZonedTimestampType) TimestampKind(org.apache.flink.table.types.logical.TimestampKind)

Aggregations

TableException (org.apache.flink.table.api.TableException)163 RowData (org.apache.flink.table.data.RowData)35 RowType (org.apache.flink.table.types.logical.RowType)35 Transformation (org.apache.flink.api.dag.Transformation)28 ArrayList (java.util.ArrayList)27 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)24 LogicalType (org.apache.flink.table.types.logical.LogicalType)24 List (java.util.List)22 DataType (org.apache.flink.table.types.DataType)19 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)18 ValidationException (org.apache.flink.table.api.ValidationException)17 IOException (java.io.IOException)13 AggregateCall (org.apache.calcite.rel.core.AggregateCall)13 ValueLiteralExpression (org.apache.flink.table.expressions.ValueLiteralExpression)13 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)13 Optional (java.util.Optional)11 Configuration (org.apache.flink.configuration.Configuration)11 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)11 Constructor (java.lang.reflect.Constructor)10 Arrays (java.util.Arrays)9