use of org.apache.flink.table.planner.plan.nodes.exec.ExecNode in project flink by apache.
the class ForwardHashExchangeProcessor method process.
@Override
public ExecNodeGraph process(ExecNodeGraph execGraph, ProcessorContext context) {
if (execGraph.getRootNodes().get(0) instanceof StreamExecNode) {
throw new TableException("StreamExecNode is not supported yet");
}
if (!context.getPlanner().getExecEnv().getConfig().isDynamicGraph()) {
return execGraph;
}
ExecNodeVisitor visitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
visitInputs(node);
if (node instanceof CommonExecExchange) {
return;
}
boolean changed = false;
List<ExecEdge> newEdges = new ArrayList<>(node.getInputEdges());
for (int i = 0; i < node.getInputProperties().size(); ++i) {
InputProperty inputProperty = node.getInputProperties().get(i);
RequiredDistribution requiredDistribution = inputProperty.getRequiredDistribution();
ExecEdge edge = node.getInputEdges().get(i);
if (requiredDistribution.getType() == DistributionType.SINGLETON) {
if (!hasExchangeInput(edge) && isInputSortedNode(node)) {
// if operation chaining is disabled, this could mark sure the
// sort node and its output can also be connected by
// ForwardPartitioner
ExecEdge newEdge = addExchangeAndReconnectEdge(edge, inputProperty, true);
newEdges.set(i, newEdge);
changed = true;
}
continue;
}
if (requiredDistribution.getType() != DistributionType.HASH) {
continue;
}
if (!hasExchangeInput(edge)) {
ExecEdge newEdge;
if (isInputSortedNode(node)) {
if (hasSortInputForInputSortedNode(node)) {
// add Exchange with keep_input_as_is distribution as the
// input of Sort
ExecNode<?> sort = edge.getSource();
ExecEdge newEdgeOfSort = addExchangeAndReconnectEdge(sort.getInputEdges().get(0), inputProperty, false);
sort.setInputEdges(Collections.singletonList(newEdgeOfSort));
}
// if operation chaining is disabled, this could mark sure the
// sort node and its output can also be connected by
// ForwardPartitioner
newEdge = addExchangeAndReconnectEdge(edge, inputProperty, true);
} else {
// add Exchange with keep_input_as_is distribution as the input
// of the node
newEdge = addExchangeAndReconnectEdge(edge, inputProperty, false);
updateOriginalEdgeInMultipleInput(node, i, (BatchExecExchange) newEdge.getSource());
}
// update the edge
newEdges.set(i, newEdge);
changed = true;
} else if (hasSortInputForInputSortedNode(node)) {
// if operation chaining is disabled, this could mark sure the sort
// node and its output can also be connected by ForwardPartitioner
ExecEdge newEdge = addExchangeAndReconnectEdge(edge, inputProperty, true);
newEdges.set(i, newEdge);
changed = true;
}
}
if (changed) {
node.setInputEdges(newEdges);
}
}
};
execGraph.getRootNodes().forEach(s -> s.accept(visitor));
return execGraph;
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecNode in project flink by apache.
the class InputPriorityGraphGenerator method createTopologyGraph.
protected void createTopologyGraph() {
// build an initial topology graph
graph = new TopologyGraph(roots, boundaries);
// check and resolve conflicts about input priorities
AbstractExecNodeExactlyOnceVisitor inputPriorityVisitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
if (!boundaries.contains(node)) {
visitInputs(node);
}
updateTopologyGraph(node);
}
};
roots.forEach(n -> n.accept(inputPriorityVisitor));
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecNode in project flink by apache.
the class InputPriorityGraphGenerator method calculatePipelinedAncestors.
/**
* Find the ancestors by going through PIPELINED edges.
*/
@VisibleForTesting
List<ExecNode<?>> calculatePipelinedAncestors(ExecNode<?> node) {
List<ExecNode<?>> ret = new ArrayList<>();
AbstractExecNodeExactlyOnceVisitor ancestorVisitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
boolean hasAncestor = false;
if (!boundaries.contains(node)) {
List<InputProperty> inputProperties = node.getInputProperties();
for (int i = 0; i < inputProperties.size(); i++) {
// we only go through PIPELINED edges
if (inputProperties.get(i).getDamBehavior().stricterOrEqual(safeDamBehavior)) {
continue;
}
hasAncestor = true;
node.getInputEdges().get(i).getSource().accept(this);
}
}
if (!hasAncestor) {
ret.add(node);
}
}
};
node.accept(ancestorVisitor);
return ret;
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecNode in project flink by apache.
the class InputOrderCalculatorTest method testCalculateInputOrderWithRelatedBoundaries.
@Test
public void testCalculateInputOrderWithRelatedBoundaries() {
// P = InputProperty.DamBehavior.PIPELINED, B = InputProperty.DamBehavior.BLOCKING
// P1 = PIPELINED + priority 1
//
// /------------(P0)------------\
// 0 -(P0)-> 1 -(B0)-> 2 -(P0)-> 4 -(P1)-> 5
// 3 -(P1)-/ 6 -(B0)-/
TestingBatchExecNode[] nodes = new TestingBatchExecNode[7];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
}
nodes[1].addInput(nodes[0]);
nodes[2].addInput(nodes[1], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build());
nodes[2].addInput(nodes[3], InputProperty.builder().priority(1).build());
nodes[4].addInput(nodes[0]);
nodes[4].addInput(nodes[2]);
nodes[5].addInput(nodes[4], InputProperty.builder().priority(1).build());
nodes[5].addInput(nodes[6], InputProperty.builder().damBehavior(InputProperty.DamBehavior.BLOCKING).build());
InputOrderCalculator calculator = new InputOrderCalculator(nodes[5], new HashSet<>(Arrays.asList(nodes[0], nodes[1], nodes[3], nodes[6])), InputProperty.DamBehavior.BLOCKING);
Map<ExecNode<?>, Integer> result = calculator.calculate();
Assert.assertEquals(4, result.size());
Assert.assertEquals(1, result.get(nodes[0]).intValue());
Assert.assertEquals(1, result.get(nodes[1]).intValue());
Assert.assertEquals(2, result.get(nodes[3]).intValue());
Assert.assertEquals(0, result.get(nodes[6]).intValue());
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecNode in project flink by apache.
the class InputPriorityGraphGeneratorTest method testCalculateBoundedPipelinedAncestors.
@Test
public void testCalculateBoundedPipelinedAncestors() {
// P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT
//
// 0 -P-> 1 -P-> 2
// 3 -P-> 4 -E/
TestingBatchExecNode[] nodes = new TestingBatchExecNode[5];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
}
nodes[1].addInput(nodes[0]);
nodes[2].addInput(nodes[1]);
nodes[2].addInput(nodes[4], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build());
nodes[4].addInput(nodes[3]);
TestingInputPriorityConflictResolver resolver = new TestingInputPriorityConflictResolver(Collections.singletonList(nodes[2]), new HashSet<>(Collections.singleton(nodes[1])), InputProperty.DamBehavior.END_INPUT);
List<ExecNode<?>> ancestors = resolver.calculatePipelinedAncestors(nodes[2]);
Assert.assertEquals(1, ancestors.size());
Assert.assertTrue(ancestors.contains(nodes[1]));
}
Aggregations