use of org.apache.flink.table.planner.plan.nodes.exec.InputProperty in project flink by apache.
the class StreamExecExchange method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform = (Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
final StreamPartitioner<RowData> partitioner;
final int parallelism;
final InputProperty inputProperty = getInputProperties().get(0);
final InputProperty.DistributionType distributionType = inputProperty.getRequiredDistribution().getType();
switch(distributionType) {
case SINGLETON:
partitioner = new GlobalPartitioner<>();
parallelism = 1;
break;
case HASH:
// TODO Eliminate duplicate keys
int[] keys = ((HashDistribution) inputProperty.getRequiredDistribution()).getKeys();
InternalTypeInfo<RowData> inputType = (InternalTypeInfo<RowData>) inputTransform.getOutputType();
RowDataKeySelector keySelector = KeySelectorUtil.getRowDataSelector(keys, inputType);
partitioner = new KeyGroupStreamPartitioner<>(keySelector, DEFAULT_LOWER_BOUND_MAX_PARALLELISM);
parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
break;
default:
throw new TableException(String.format("%s is not supported now!", distributionType));
}
final Transformation<RowData> transformation = new PartitionTransformation<>(inputTransform, partitioner);
createTransformationMeta(EXCHANGE_TRANSFORMATION, config).fill(transformation);
transformation.setParallelism(parallelism);
transformation.setOutputType(InternalTypeInfo.of(getOutputType()));
return transformation;
}
use of org.apache.flink.table.planner.plan.nodes.exec.InputProperty in project flink by apache.
the class BatchExecMultipleInput method translateToPlanInternal.
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final List<Transformation<?>> inputTransforms = new ArrayList<>();
for (ExecEdge inputEdge : getInputEdges()) {
inputTransforms.add(inputEdge.translateToPlan(planner));
}
final Transformation<?> outputTransform = rootNode.translateToPlan(planner);
final int[] readOrders = getInputProperties().stream().map(InputProperty::getPriority).mapToInt(i -> i).toArray();
final TableOperatorWrapperGenerator generator = new TableOperatorWrapperGenerator(inputTransforms, outputTransform, readOrders);
generator.generate();
final List<Pair<Transformation<?>, InputSpec>> inputTransformAndInputSpecPairs = generator.getInputTransformAndInputSpecPairs();
final MultipleInputTransformation<RowData> multipleInputTransform = new MultipleInputTransformation<>(createTransformationName(config), new BatchMultipleInputStreamOperatorFactory(inputTransformAndInputSpecPairs.stream().map(Pair::getValue).collect(Collectors.toList()), generator.getHeadWrappers(), generator.getTailWrapper()), InternalTypeInfo.of(getOutputType()), generator.getParallelism());
multipleInputTransform.setDescription(createTransformationDescription(config));
inputTransformAndInputSpecPairs.forEach(input -> multipleInputTransform.addInput(input.getKey()));
if (generator.getMaxParallelism() > 0) {
multipleInputTransform.setMaxParallelism(generator.getMaxParallelism());
}
// set resources
multipleInputTransform.setResources(generator.getMinResources(), generator.getPreferredResources());
final int memoryWeight = generator.getManagedMemoryWeight();
final long memoryBytes = (long) memoryWeight << 20;
ExecNodeUtil.setManagedMemoryWeight(multipleInputTransform, memoryBytes);
// set chaining strategy for source chaining
multipleInputTransform.setChainingStrategy(ChainingStrategy.HEAD_WITH_SOURCES);
return multipleInputTransform;
}
use of org.apache.flink.table.planner.plan.nodes.exec.InputProperty in project flink by apache.
the class MultipleInputNodeCreationProcessor method createStreamMultipleInputNode.
private StreamExecMultipleInput createStreamMultipleInputNode(MultipleInputGroup group, List<Tuple3<ExecNode<?>, InputProperty, ExecEdge>> inputs) {
ExecNode<?> rootNode = group.root.execNode;
List<ExecNode<?>> inputNodes = new ArrayList<>();
for (Tuple3<ExecNode<?>, InputProperty, ExecEdge> tuple3 : inputs) {
inputNodes.add(tuple3.f0);
}
String description = ExecNodeUtil.getMultipleInputDescription(rootNode, inputNodes, new ArrayList<>());
StreamExecMultipleInput multipleInput = new StreamExecMultipleInput(inputNodes.stream().map(i -> InputProperty.DEFAULT).collect(Collectors.toList()), rootNode, description);
List<ExecEdge> inputEdges = new ArrayList<>(inputNodes.size());
for (ExecNode<?> inputNode : inputNodes) {
inputEdges.add(ExecEdge.builder().source(inputNode).target(multipleInput).build());
}
multipleInput.setInputEdges(inputEdges);
return multipleInput;
}
use of org.apache.flink.table.planner.plan.nodes.exec.InputProperty in project flink by apache.
the class MultipleInputNodeCreationProcessor method createMultipleInputNode.
private ExecNode<?> createMultipleInputNode(MultipleInputGroup group, Map<ExecNodeWrapper, ExecNode<?>> visitedMap) {
// calculate the inputs of the multiple input node
List<Tuple3<ExecNode<?>, InputProperty, ExecEdge>> inputs = new ArrayList<>();
for (ExecNodeWrapper member : group.members) {
for (int i = 0; i < member.inputs.size(); i++) {
ExecNodeWrapper memberInput = member.inputs.get(i);
if (group.members.contains(memberInput)) {
continue;
}
Preconditions.checkState(visitedMap.containsKey(memberInput), "Input of a multiple input member is not visited. This is a bug.");
ExecNode<?> inputNode = visitedMap.get(memberInput);
InputProperty inputProperty = member.execNode.getInputProperties().get(i);
ExecEdge edge = member.execNode.getInputEdges().get(i);
inputs.add(Tuple3.of(inputNode, inputProperty, edge));
}
}
if (isStreaming) {
return createStreamMultipleInputNode(group, inputs);
} else {
return createBatchMultipleInputNode(group, inputs);
}
}
use of org.apache.flink.table.planner.plan.nodes.exec.InputProperty in project flink by apache.
the class InputPriorityConflictResolver method resolveInputPriorityConflict.
@Override
protected void resolveInputPriorityConflict(ExecNode<?> node, int higherInput, int lowerInput) {
ExecNode<?> higherNode = node.getInputEdges().get(higherInput).getSource();
ExecNode<?> lowerNode = node.getInputEdges().get(lowerInput).getSource();
final ExecNode<?> newNode;
if (lowerNode instanceof BatchExecExchange) {
BatchExecExchange exchange = (BatchExecExchange) lowerNode;
InputProperty inputEdge = exchange.getInputProperties().get(0);
InputProperty inputProperty = InputProperty.builder().requiredDistribution(inputEdge.getRequiredDistribution()).priority(inputEdge.getPriority()).damBehavior(getDamBehavior()).build();
if (isConflictCausedByExchange(higherNode, exchange)) {
// special case: if exchange is exactly the reuse node,
// we should split it into two nodes
BatchExecExchange newExchange = new BatchExecExchange(inputProperty, (RowType) exchange.getOutputType(), "Exchange");
newExchange.setRequiredExchangeMode(exchangeMode);
newExchange.setInputEdges(exchange.getInputEdges());
newNode = newExchange;
} else {
// create new BatchExecExchange with new inputProperty
BatchExecExchange newExchange = new BatchExecExchange(inputProperty, (RowType) exchange.getOutputType(), exchange.getDescription());
newExchange.setRequiredExchangeMode(exchangeMode);
newExchange.setInputEdges(exchange.getInputEdges());
newNode = newExchange;
}
} else {
newNode = createExchange(node, lowerInput);
}
ExecEdge newEdge = ExecEdge.builder().source(newNode).target(node).build();
node.replaceInputEdge(lowerInput, newEdge);
}
Aggregations