use of org.apache.flink.table.api.TableException in project flink by apache.
the class CommonExecSink method deriveSinkParallelism.
/**
* Returns the parallelism of sink operator, it assumes the sink runtime provider implements
* {@link ParallelismProvider}. It returns parallelism defined in {@link ParallelismProvider} if
* the parallelism is provided, otherwise it uses parallelism of input transformation.
*/
private int deriveSinkParallelism(Transformation<RowData> inputTransform, SinkRuntimeProvider runtimeProvider) {
final int inputParallelism = inputTransform.getParallelism();
if (!(runtimeProvider instanceof ParallelismProvider)) {
return inputParallelism;
}
final ParallelismProvider parallelismProvider = (ParallelismProvider) runtimeProvider;
return parallelismProvider.getParallelism().map(sinkParallelism -> {
if (sinkParallelism <= 0) {
throw new TableException(String.format("Invalid configured parallelism %s for table '%s'.", sinkParallelism, tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
}
return sinkParallelism;
}).orElse(inputParallelism);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class JsonPlanGraph method convertToExecNodeGraph.
ExecNodeGraph convertToExecNodeGraph() {
Map<Integer, ExecNode<?>> idToExecNodes = new HashMap<>();
for (ExecNode<?> execNode : nodes) {
int id = execNode.getId();
if (idToExecNodes.containsKey(id)) {
throw new TableException(String.format("The id: %s is not unique for ExecNode: %s.\nplease check it.", id, execNode.getDescription()));
}
idToExecNodes.put(id, execNode);
}
Map<Integer, List<ExecEdge>> idToInputEdges = new HashMap<>();
Map<Integer, List<ExecEdge>> idToOutputEdges = new HashMap<>();
for (JsonPlanEdge edge : edges) {
ExecNode<?> source = idToExecNodes.get(edge.getSourceId());
if (source == null) {
throw new TableException(String.format("Source node id: %s is not found in nodes.", edge.getSourceId()));
}
ExecNode<?> target = idToExecNodes.get(edge.getTargetId());
if (target == null) {
throw new TableException(String.format("Target node id: %s is not found in nodes.", edge.getTargetId()));
}
ExecEdge execEdge = ExecEdge.builder().source(source).target(target).shuffle(edge.getShuffle()).exchangeMode(edge.getExchangeMode()).build();
idToInputEdges.computeIfAbsent(target.getId(), n -> new ArrayList<>()).add(execEdge);
idToOutputEdges.computeIfAbsent(source.getId(), n -> new ArrayList<>()).add(execEdge);
}
List<ExecNode<?>> rootNodes = new ArrayList<>();
for (Map.Entry<Integer, ExecNode<?>> entry : idToExecNodes.entrySet()) {
int id = entry.getKey();
ExecNode<?> node = entry.getValue();
// connect input edges
List<ExecEdge> inputEdges = idToInputEdges.getOrDefault(id, new ArrayList<>());
node.setInputEdges(inputEdges);
if (!idToOutputEdges.containsKey(id)) {
// if the node has no output nodes, it's a root node
rootNodes.add(node);
}
}
return new ExecNodeGraph(flinkVersion, rootNodes);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class JsonPlanGraph method fromExecNodeGraph.
static JsonPlanGraph fromExecNodeGraph(ExecNodeGraph execGraph) {
final List<ExecNode<?>> allNodes = new ArrayList<>();
final List<JsonPlanEdge> allEdges = new ArrayList<>();
final Set<Integer> nodesIds = new HashSet<>();
// for quick search
final Set<ExecNode<?>> visitedNodes = Sets.newIdentityHashSet();
// visit the nodes as topological ordering
final ExecNodeVisitor visitor = new ExecNodeVisitorImpl() {
@Override
public void visit(ExecNode<?> node) {
if (visitedNodes.contains(node)) {
return;
}
super.visitInputs(node);
final int id = node.getId();
if (nodesIds.contains(id)) {
throw new TableException(String.format("The id: %s is not unique for ExecNode: %s.\nplease check it.", id, node.getDescription()));
}
allNodes.add(node);
nodesIds.add(id);
visitedNodes.add(node);
for (ExecEdge execEdge : node.getInputEdges()) {
allEdges.add(JsonPlanEdge.fromExecEdge(execEdge));
}
}
};
execGraph.getRootNodes().forEach(visitor::visit);
checkArgument(allNodes.size() == nodesIds.size());
return new JsonPlanGraph(execGraph.getFlinkVersion(), allNodes, allEdges);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class LogicalTypeJsonDeserializer method deserializeSpecializedRaw.
@SuppressWarnings({ "unchecked", "rawtypes" })
private static LogicalType deserializeSpecializedRaw(JsonNode logicalTypeNode, SerdeContext serdeContext) {
final Class<?> clazz = loadClass(logicalTypeNode.get(FIELD_NAME_CLASS).asText(), serdeContext, "RAW type");
final TypeSerializer<?> serializer;
if (logicalTypeNode.has(FIELD_NAME_SPECIAL_SERIALIZER)) {
final String specialSerializer = logicalTypeNode.get(FIELD_NAME_SPECIAL_SERIALIZER).asText();
if (FIELD_VALUE_EXTERNAL_SERIALIZER_NULL.equals(specialSerializer)) {
serializer = NullSerializer.INSTANCE;
} else {
throw new TableException("Unknown external serializer: " + specialSerializer);
}
} else if (logicalTypeNode.has(FIELD_NAME_EXTERNAL_DATA_TYPE)) {
final DataType dataType = DataTypeJsonDeserializer.deserialize(logicalTypeNode.get(FIELD_NAME_EXTERNAL_DATA_TYPE), serdeContext);
serializer = ExternalSerializer.of(dataType);
} else {
throw new TableException("Invalid RAW type.");
}
return new RawType(clazz, serializer);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class LogicalTypeJsonDeserializer method deserializeTimestamp.
private static LogicalType deserializeTimestamp(LogicalTypeRoot typeRoot, JsonNode logicalTypeNode) {
final int precision = logicalTypeNode.get(FIELD_NAME_PRECISION).asInt();
final TimestampKind kind = TimestampKind.valueOf(logicalTypeNode.get(FIELD_NAME_TIMESTAMP_KIND).asText());
switch(typeRoot) {
case TIMESTAMP_WITHOUT_TIME_ZONE:
return new TimestampType(true, kind, precision);
case TIMESTAMP_WITH_TIME_ZONE:
return new ZonedTimestampType(true, kind, precision);
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return new LocalZonedTimestampType(true, kind, precision);
default:
throw new TableException("Timestamp type root expected.");
}
}
Aggregations